diff options
Diffstat (limited to 'thirdparty/bullet/BulletCollision/BroadphaseCollision')
21 files changed, 3259 insertions, 3517 deletions
diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.cpp index 77763305b1..ec6fe9f4d8 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.cpp @@ -2,7 +2,6 @@ //Bullet Continuous Collision Detection and Physics Library //Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ - // // btAxisSweep3 // @@ -19,18 +18,15 @@ // 3. This notice may not be removed or altered from any source distribution. #include "btAxisSweep3.h" - -btAxisSweep3::btAxisSweep3(const btVector3& worldAabbMin,const btVector3& worldAabbMax, unsigned short int maxHandles, btOverlappingPairCache* pairCache, bool disableRaycastAccelerator) -:btAxisSweep3Internal<unsigned short int>(worldAabbMin,worldAabbMax,0xfffe,0xffff,maxHandles,pairCache,disableRaycastAccelerator) +btAxisSweep3::btAxisSweep3(const btVector3& worldAabbMin, const btVector3& worldAabbMax, unsigned short int maxHandles, btOverlappingPairCache* pairCache, bool disableRaycastAccelerator) + : btAxisSweep3Internal<unsigned short int>(worldAabbMin, worldAabbMax, 0xfffe, 0xffff, maxHandles, pairCache, disableRaycastAccelerator) { // 1 handle is reserved as sentinel btAssert(maxHandles > 1 && maxHandles < 32767); - } - -bt32BitAxisSweep3::bt32BitAxisSweep3(const btVector3& worldAabbMin,const btVector3& worldAabbMax, unsigned int maxHandles , btOverlappingPairCache* pairCache , bool disableRaycastAccelerator) -:btAxisSweep3Internal<unsigned int>(worldAabbMin,worldAabbMax,0xfffffffe,0x7fffffff,maxHandles,pairCache,disableRaycastAccelerator) +bt32BitAxisSweep3::bt32BitAxisSweep3(const btVector3& worldAabbMin, const btVector3& worldAabbMax, unsigned int maxHandles, btOverlappingPairCache* pairCache, bool disableRaycastAccelerator) + : btAxisSweep3Internal<unsigned int>(worldAabbMin, worldAabbMax, 0xfffffffe, 0x7fffffff, maxHandles, pairCache, disableRaycastAccelerator) { // 1 handle is reserved as sentinel btAssert(maxHandles > 1 && maxHandles < 2147483647); diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.h index a3648df1af..1e42f25f3b 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3.h @@ -33,9 +33,7 @@ class btAxisSweep3 : public btAxisSweep3Internal<unsigned short int> { public: - - btAxisSweep3(const btVector3& worldAabbMin,const btVector3& worldAabbMax, unsigned short int maxHandles = 16384, btOverlappingPairCache* pairCache = 0, bool disableRaycastAccelerator = false); - + btAxisSweep3(const btVector3& worldAabbMin, const btVector3& worldAabbMax, unsigned short int maxHandles = 16384, btOverlappingPairCache* pairCache = 0, bool disableRaycastAccelerator = false); }; /// The bt32BitAxisSweep3 allows higher precision quantization and more objects compared to the btAxisSweep3 sweep and prune. @@ -44,9 +42,7 @@ public: class bt32BitAxisSweep3 : public btAxisSweep3Internal<unsigned int> { public: - - bt32BitAxisSweep3(const btVector3& worldAabbMin,const btVector3& worldAabbMax, unsigned int maxHandles = 1500000, btOverlappingPairCache* pairCache = 0, bool disableRaycastAccelerator = false); - + bt32BitAxisSweep3(const btVector3& worldAabbMin, const btVector3& worldAabbMax, unsigned int maxHandles = 1500000, btOverlappingPairCache* pairCache = 0, bool disableRaycastAccelerator = false); }; #endif diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3Internal.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3Internal.h index 323aa96dca..2ee35528fd 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3Internal.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btAxisSweep3Internal.h @@ -36,172 +36,158 @@ template <typename BP_FP_INT_TYPE> class btAxisSweep3Internal : public btBroadphaseInterface { protected: - - BP_FP_INT_TYPE m_bpHandleMask; - BP_FP_INT_TYPE m_handleSentinel; + BP_FP_INT_TYPE m_bpHandleMask; + BP_FP_INT_TYPE m_handleSentinel; public: - - BT_DECLARE_ALIGNED_ALLOCATOR(); + BT_DECLARE_ALIGNED_ALLOCATOR(); class Edge { public: - BP_FP_INT_TYPE m_pos; // low bit is min/max + BP_FP_INT_TYPE m_pos; // low bit is min/max BP_FP_INT_TYPE m_handle; - BP_FP_INT_TYPE IsMax() const {return static_cast<BP_FP_INT_TYPE>(m_pos & 1);} + BP_FP_INT_TYPE IsMax() const { return static_cast<BP_FP_INT_TYPE>(m_pos & 1); } }; public: - class Handle : public btBroadphaseProxy + class Handle : public btBroadphaseProxy { public: - BT_DECLARE_ALIGNED_ALLOCATOR(); - + BT_DECLARE_ALIGNED_ALLOCATOR(); + // indexes into the edge arrays - BP_FP_INT_TYPE m_minEdges[3], m_maxEdges[3]; // 6 * 2 = 12 -// BP_FP_INT_TYPE m_uniqueId; - btBroadphaseProxy* m_dbvtProxy;//for faster raycast + BP_FP_INT_TYPE m_minEdges[3], m_maxEdges[3]; // 6 * 2 = 12 + // BP_FP_INT_TYPE m_uniqueId; + btBroadphaseProxy* m_dbvtProxy; //for faster raycast //void* m_pOwner; this is now in btBroadphaseProxy.m_clientObject - - SIMD_FORCE_INLINE void SetNextFree(BP_FP_INT_TYPE next) {m_minEdges[0] = next;} - SIMD_FORCE_INLINE BP_FP_INT_TYPE GetNextFree() const {return m_minEdges[0];} - }; // 24 bytes + 24 for Edge structures = 44 bytes total per entry - + SIMD_FORCE_INLINE void SetNextFree(BP_FP_INT_TYPE next) { m_minEdges[0] = next; } + SIMD_FORCE_INLINE BP_FP_INT_TYPE GetNextFree() const { return m_minEdges[0]; } + }; // 24 bytes + 24 for Edge structures = 44 bytes total per entry + protected: - btVector3 m_worldAabbMin; // overall system bounds - btVector3 m_worldAabbMax; // overall system bounds + btVector3 m_worldAabbMin; // overall system bounds + btVector3 m_worldAabbMax; // overall system bounds + + btVector3 m_quantize; // scaling factor for quantization - btVector3 m_quantize; // scaling factor for quantization + BP_FP_INT_TYPE m_numHandles; // number of active handles + BP_FP_INT_TYPE m_maxHandles; // max number of handles + Handle* m_pHandles; // handles pool - BP_FP_INT_TYPE m_numHandles; // number of active handles - BP_FP_INT_TYPE m_maxHandles; // max number of handles - Handle* m_pHandles; // handles pool - - BP_FP_INT_TYPE m_firstFreeHandle; // free handles list + BP_FP_INT_TYPE m_firstFreeHandle; // free handles list - Edge* m_pEdges[3]; // edge arrays for the 3 axes (each array has m_maxHandles * 2 + 2 sentinel entries) + Edge* m_pEdges[3]; // edge arrays for the 3 axes (each array has m_maxHandles * 2 + 2 sentinel entries) void* m_pEdgesRawPtr[3]; btOverlappingPairCache* m_pairCache; ///btOverlappingPairCallback is an additional optional user callback for adding/removing overlapping pairs, similar interface to btOverlappingPairCache. btOverlappingPairCallback* m_userPairCallback; - - bool m_ownsPairCache; - int m_invalidPair; + bool m_ownsPairCache; + + int m_invalidPair; ///additional dynamic aabb structure, used to accelerate ray cast queries. ///can be disabled using a optional argument in the constructor - btDbvtBroadphase* m_raycastAccelerator; - btOverlappingPairCache* m_nullPairCache; - + btDbvtBroadphase* m_raycastAccelerator; + btOverlappingPairCache* m_nullPairCache; // allocation/deallocation BP_FP_INT_TYPE allocHandle(); void freeHandle(BP_FP_INT_TYPE handle); - - bool testOverlap2D(const Handle* pHandleA, const Handle* pHandleB,int axis0,int axis1); + bool testOverlap2D(const Handle* pHandleA, const Handle* pHandleB, int axis0, int axis1); #ifdef DEBUG_BROADPHASE - void debugPrintAxis(int axis,bool checkCardinality=true); -#endif //DEBUG_BROADPHASE + void debugPrintAxis(int axis, bool checkCardinality = true); +#endif //DEBUG_BROADPHASE //Overlap* AddOverlap(BP_FP_INT_TYPE handleA, BP_FP_INT_TYPE handleB); //void RemoveOverlap(BP_FP_INT_TYPE handleA, BP_FP_INT_TYPE handleB); - - - void sortMinDown(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps ); - void sortMinUp(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps ); - void sortMaxDown(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps ); - void sortMaxUp(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps ); + void sortMinDown(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps); + void sortMinUp(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps); + void sortMaxDown(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps); + void sortMaxUp(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps); public: + btAxisSweep3Internal(const btVector3& worldAabbMin, const btVector3& worldAabbMax, BP_FP_INT_TYPE handleMask, BP_FP_INT_TYPE handleSentinel, BP_FP_INT_TYPE maxHandles = 16384, btOverlappingPairCache* pairCache = 0, bool disableRaycastAccelerator = false); - btAxisSweep3Internal(const btVector3& worldAabbMin,const btVector3& worldAabbMax, BP_FP_INT_TYPE handleMask, BP_FP_INT_TYPE handleSentinel, BP_FP_INT_TYPE maxHandles = 16384, btOverlappingPairCache* pairCache=0,bool disableRaycastAccelerator = false); - - virtual ~btAxisSweep3Internal(); + virtual ~btAxisSweep3Internal(); BP_FP_INT_TYPE getNumHandles() const { return m_numHandles; } - virtual void calculateOverlappingPairs(btDispatcher* dispatcher); - - BP_FP_INT_TYPE addHandle(const btVector3& aabbMin,const btVector3& aabbMax, void* pOwner, int collisionFilterGroup, int collisionFilterMask,btDispatcher* dispatcher); - void removeHandle(BP_FP_INT_TYPE handle,btDispatcher* dispatcher); - void updateHandle(BP_FP_INT_TYPE handle, const btVector3& aabbMin,const btVector3& aabbMax,btDispatcher* dispatcher); - SIMD_FORCE_INLINE Handle* getHandle(BP_FP_INT_TYPE index) const {return m_pHandles + index;} + virtual void calculateOverlappingPairs(btDispatcher* dispatcher); + + BP_FP_INT_TYPE addHandle(const btVector3& aabbMin, const btVector3& aabbMax, void* pOwner, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher); + void removeHandle(BP_FP_INT_TYPE handle, btDispatcher* dispatcher); + void updateHandle(BP_FP_INT_TYPE handle, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* dispatcher); + SIMD_FORCE_INLINE Handle* getHandle(BP_FP_INT_TYPE index) const { return m_pHandles + index; } virtual void resetPool(btDispatcher* dispatcher); - void processAllOverlappingPairs(btOverlapCallback* callback); + void processAllOverlappingPairs(btOverlapCallback* callback); //Broadphase Interface - virtual btBroadphaseProxy* createProxy( const btVector3& aabbMin, const btVector3& aabbMax,int shapeType,void* userPtr , int collisionFilterGroup, int collisionFilterMask,btDispatcher* dispatcher); - virtual void destroyProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher); - virtual void setAabb(btBroadphaseProxy* proxy,const btVector3& aabbMin,const btVector3& aabbMax,btDispatcher* dispatcher); - virtual void getAabb(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const; - - virtual void rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin=btVector3(0,0,0), const btVector3& aabbMax = btVector3(0,0,0)); - virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback); - - + virtual btBroadphaseProxy* createProxy(const btVector3& aabbMin, const btVector3& aabbMax, int shapeType, void* userPtr, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher); + virtual void destroyProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher); + virtual void setAabb(btBroadphaseProxy* proxy, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* dispatcher); + virtual void getAabb(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const; + + virtual void rayTest(const btVector3& rayFrom, const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin = btVector3(0, 0, 0), const btVector3& aabbMax = btVector3(0, 0, 0)); + virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback); + void quantize(BP_FP_INT_TYPE* out, const btVector3& point, int isMax) const; ///unQuantize should be conservative: aabbMin/aabbMax should be larger then 'getAabb' result - void unQuantize(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const; - - bool testAabbOverlap(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1); + void unQuantize(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const; + + bool testAabbOverlap(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1); - btOverlappingPairCache* getOverlappingPairCache() + btOverlappingPairCache* getOverlappingPairCache() { return m_pairCache; } - const btOverlappingPairCache* getOverlappingPairCache() const + const btOverlappingPairCache* getOverlappingPairCache() const { return m_pairCache; } - void setOverlappingPairUserCallback(btOverlappingPairCallback* pairCallback) + void setOverlappingPairUserCallback(btOverlappingPairCallback* pairCallback) { m_userPairCallback = pairCallback; } - const btOverlappingPairCallback* getOverlappingPairUserCallback() const + const btOverlappingPairCallback* getOverlappingPairUserCallback() const { return m_userPairCallback; } ///getAabb returns the axis aligned bounding box in the 'global' coordinate frame ///will add some transform later - virtual void getBroadphaseAabb(btVector3& aabbMin,btVector3& aabbMax) const + virtual void getBroadphaseAabb(btVector3& aabbMin, btVector3& aabbMax) const { aabbMin = m_worldAabbMin; aabbMax = m_worldAabbMax; } - virtual void printStats() + virtual void printStats() { -/* printf("btAxisSweep3.h\n"); + /* printf("btAxisSweep3.h\n"); printf("numHandles = %d, maxHandles = %d\n",m_numHandles,m_maxHandles); printf("aabbMin=%f,%f,%f,aabbMax=%f,%f,%f\n",m_worldAabbMin.getX(),m_worldAabbMin.getY(),m_worldAabbMin.getZ(), m_worldAabbMax.getX(),m_worldAabbMax.getY(),m_worldAabbMax.getZ()); */ - } - }; //////////////////////////////////////////////////////////////////// - - - #ifdef DEBUG_BROADPHASE #include <stdio.h> @@ -209,75 +195,73 @@ template <typename BP_FP_INT_TYPE> void btAxisSweep3<BP_FP_INT_TYPE>::debugPrintAxis(int axis, bool checkCardinality) { int numEdges = m_pHandles[0].m_maxEdges[axis]; - printf("SAP Axis %d, numEdges=%d\n",axis,numEdges); + printf("SAP Axis %d, numEdges=%d\n", axis, numEdges); int i; - for (i=0;i<numEdges+1;i++) + for (i = 0; i < numEdges + 1; i++) { Edge* pEdge = m_pEdges[axis] + i; Handle* pHandlePrev = getHandle(pEdge->m_handle); - int handleIndex = pEdge->IsMax()? pHandlePrev->m_maxEdges[axis] : pHandlePrev->m_minEdges[axis]; + int handleIndex = pEdge->IsMax() ? pHandlePrev->m_maxEdges[axis] : pHandlePrev->m_minEdges[axis]; char beginOrEnd; - beginOrEnd=pEdge->IsMax()?'E':'B'; - printf(" [%c,h=%d,p=%x,i=%d]\n",beginOrEnd,pEdge->m_handle,pEdge->m_pos,handleIndex); + beginOrEnd = pEdge->IsMax() ? 'E' : 'B'; + printf(" [%c,h=%d,p=%x,i=%d]\n", beginOrEnd, pEdge->m_handle, pEdge->m_pos, handleIndex); } if (checkCardinality) - btAssert(numEdges == m_numHandles*2+1); + btAssert(numEdges == m_numHandles * 2 + 1); } -#endif //DEBUG_BROADPHASE +#endif //DEBUG_BROADPHASE template <typename BP_FP_INT_TYPE> -btBroadphaseProxy* btAxisSweep3Internal<BP_FP_INT_TYPE>::createProxy( const btVector3& aabbMin, const btVector3& aabbMax,int shapeType,void* userPtr, int collisionFilterGroup, int collisionFilterMask,btDispatcher* dispatcher) +btBroadphaseProxy* btAxisSweep3Internal<BP_FP_INT_TYPE>::createProxy(const btVector3& aabbMin, const btVector3& aabbMax, int shapeType, void* userPtr, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher) { - (void)shapeType; - BP_FP_INT_TYPE handleId = addHandle(aabbMin,aabbMax, userPtr,collisionFilterGroup,collisionFilterMask,dispatcher); - - Handle* handle = getHandle(handleId); - - if (m_raycastAccelerator) - { - btBroadphaseProxy* rayProxy = m_raycastAccelerator->createProxy(aabbMin,aabbMax,shapeType,userPtr,collisionFilterGroup,collisionFilterMask,dispatcher); - handle->m_dbvtProxy = rayProxy; - } - return handle; -} + (void)shapeType; + BP_FP_INT_TYPE handleId = addHandle(aabbMin, aabbMax, userPtr, collisionFilterGroup, collisionFilterMask, dispatcher); + Handle* handle = getHandle(handleId); + if (m_raycastAccelerator) + { + btBroadphaseProxy* rayProxy = m_raycastAccelerator->createProxy(aabbMin, aabbMax, shapeType, userPtr, collisionFilterGroup, collisionFilterMask, dispatcher); + handle->m_dbvtProxy = rayProxy; + } + return handle; +} template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::destroyProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher) +void btAxisSweep3Internal<BP_FP_INT_TYPE>::destroyProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher) { Handle* handle = static_cast<Handle*>(proxy); if (m_raycastAccelerator) - m_raycastAccelerator->destroyProxy(handle->m_dbvtProxy,dispatcher); + m_raycastAccelerator->destroyProxy(handle->m_dbvtProxy, dispatcher); removeHandle(static_cast<BP_FP_INT_TYPE>(handle->m_uniqueId), dispatcher); } template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::setAabb(btBroadphaseProxy* proxy,const btVector3& aabbMin,const btVector3& aabbMax,btDispatcher* dispatcher) +void btAxisSweep3Internal<BP_FP_INT_TYPE>::setAabb(btBroadphaseProxy* proxy, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* dispatcher) { Handle* handle = static_cast<Handle*>(proxy); handle->m_aabbMin = aabbMin; handle->m_aabbMax = aabbMax; - updateHandle(static_cast<BP_FP_INT_TYPE>(handle->m_uniqueId), aabbMin, aabbMax,dispatcher); + updateHandle(static_cast<BP_FP_INT_TYPE>(handle->m_uniqueId), aabbMin, aabbMax, dispatcher); if (m_raycastAccelerator) - m_raycastAccelerator->setAabb(handle->m_dbvtProxy,aabbMin,aabbMax,dispatcher); - + m_raycastAccelerator->setAabb(handle->m_dbvtProxy, aabbMin, aabbMax, dispatcher); } template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback,const btVector3& aabbMin,const btVector3& aabbMax) +void btAxisSweep3Internal<BP_FP_INT_TYPE>::rayTest(const btVector3& rayFrom, const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin, const btVector3& aabbMax) { if (m_raycastAccelerator) { - m_raycastAccelerator->rayTest(rayFrom,rayTo,rayCallback,aabbMin,aabbMax); - } else + m_raycastAccelerator->rayTest(rayFrom, rayTo, rayCallback, aabbMin, aabbMax); + } + else { //choose axis? BP_FP_INT_TYPE axis = 0; //for each proxy - for (BP_FP_INT_TYPE i=1;i<m_numHandles*2+1;i++) + for (BP_FP_INT_TYPE i = 1; i < m_numHandles * 2 + 1; i++) { if (m_pEdges[axis][i].IsMax()) { @@ -288,22 +272,23 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::rayTest(const btVector3& rayFrom,cons } template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback) +void btAxisSweep3Internal<BP_FP_INT_TYPE>::aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback) { if (m_raycastAccelerator) { - m_raycastAccelerator->aabbTest(aabbMin,aabbMax,callback); - } else + m_raycastAccelerator->aabbTest(aabbMin, aabbMax, callback); + } + else { //choose axis? BP_FP_INT_TYPE axis = 0; //for each proxy - for (BP_FP_INT_TYPE i=1;i<m_numHandles*2+1;i++) + for (BP_FP_INT_TYPE i = 1; i < m_numHandles * 2 + 1; i++) { if (m_pEdges[axis][i].IsMax()) { Handle* handle = getHandle(m_pEdges[axis][i].m_handle); - if (TestAabbAgainstAabb2(aabbMin,aabbMax,handle->m_aabbMin,handle->m_aabbMax)) + if (TestAabbAgainstAabb2(aabbMin, aabbMax, handle->m_aabbMin, handle->m_aabbMax)) { callback.process(handle); } @@ -312,66 +297,60 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::aabbTest(const btVector3& aabbMin, co } } - - template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::getAabb(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const +void btAxisSweep3Internal<BP_FP_INT_TYPE>::getAabb(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const { Handle* pHandle = static_cast<Handle*>(proxy); aabbMin = pHandle->m_aabbMin; aabbMax = pHandle->m_aabbMax; } - template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::unQuantize(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const +void btAxisSweep3Internal<BP_FP_INT_TYPE>::unQuantize(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const { Handle* pHandle = static_cast<Handle*>(proxy); unsigned short vecInMin[3]; unsigned short vecInMax[3]; - vecInMin[0] = m_pEdges[0][pHandle->m_minEdges[0]].m_pos ; - vecInMax[0] = m_pEdges[0][pHandle->m_maxEdges[0]].m_pos +1 ; - vecInMin[1] = m_pEdges[1][pHandle->m_minEdges[1]].m_pos ; - vecInMax[1] = m_pEdges[1][pHandle->m_maxEdges[1]].m_pos +1 ; - vecInMin[2] = m_pEdges[2][pHandle->m_minEdges[2]].m_pos ; - vecInMax[2] = m_pEdges[2][pHandle->m_maxEdges[2]].m_pos +1 ; - - aabbMin.setValue((btScalar)(vecInMin[0]) / (m_quantize.getX()),(btScalar)(vecInMin[1]) / (m_quantize.getY()),(btScalar)(vecInMin[2]) / (m_quantize.getZ())); + vecInMin[0] = m_pEdges[0][pHandle->m_minEdges[0]].m_pos; + vecInMax[0] = m_pEdges[0][pHandle->m_maxEdges[0]].m_pos + 1; + vecInMin[1] = m_pEdges[1][pHandle->m_minEdges[1]].m_pos; + vecInMax[1] = m_pEdges[1][pHandle->m_maxEdges[1]].m_pos + 1; + vecInMin[2] = m_pEdges[2][pHandle->m_minEdges[2]].m_pos; + vecInMax[2] = m_pEdges[2][pHandle->m_maxEdges[2]].m_pos + 1; + + aabbMin.setValue((btScalar)(vecInMin[0]) / (m_quantize.getX()), (btScalar)(vecInMin[1]) / (m_quantize.getY()), (btScalar)(vecInMin[2]) / (m_quantize.getZ())); aabbMin += m_worldAabbMin; - - aabbMax.setValue((btScalar)(vecInMax[0]) / (m_quantize.getX()),(btScalar)(vecInMax[1]) / (m_quantize.getY()),(btScalar)(vecInMax[2]) / (m_quantize.getZ())); + + aabbMax.setValue((btScalar)(vecInMax[0]) / (m_quantize.getX()), (btScalar)(vecInMax[1]) / (m_quantize.getY()), (btScalar)(vecInMax[2]) / (m_quantize.getZ())); aabbMax += m_worldAabbMin; } - - - template <typename BP_FP_INT_TYPE> -btAxisSweep3Internal<BP_FP_INT_TYPE>::btAxisSweep3Internal(const btVector3& worldAabbMin,const btVector3& worldAabbMax, BP_FP_INT_TYPE handleMask, BP_FP_INT_TYPE handleSentinel,BP_FP_INT_TYPE userMaxHandles, btOverlappingPairCache* pairCache , bool disableRaycastAccelerator) -:m_bpHandleMask(handleMask), -m_handleSentinel(handleSentinel), -m_pairCache(pairCache), -m_userPairCallback(0), -m_ownsPairCache(false), -m_invalidPair(0), -m_raycastAccelerator(0) +btAxisSweep3Internal<BP_FP_INT_TYPE>::btAxisSweep3Internal(const btVector3& worldAabbMin, const btVector3& worldAabbMax, BP_FP_INT_TYPE handleMask, BP_FP_INT_TYPE handleSentinel, BP_FP_INT_TYPE userMaxHandles, btOverlappingPairCache* pairCache, bool disableRaycastAccelerator) + : m_bpHandleMask(handleMask), + m_handleSentinel(handleSentinel), + m_pairCache(pairCache), + m_userPairCallback(0), + m_ownsPairCache(false), + m_invalidPair(0), + m_raycastAccelerator(0) { - BP_FP_INT_TYPE maxHandles = static_cast<BP_FP_INT_TYPE>(userMaxHandles+1);//need to add one sentinel handle + BP_FP_INT_TYPE maxHandles = static_cast<BP_FP_INT_TYPE>(userMaxHandles + 1); //need to add one sentinel handle if (!m_pairCache) { - void* ptr = btAlignedAlloc(sizeof(btHashedOverlappingPairCache),16); - m_pairCache = new(ptr) btHashedOverlappingPairCache(); + void* ptr = btAlignedAlloc(sizeof(btHashedOverlappingPairCache), 16); + m_pairCache = new (ptr) btHashedOverlappingPairCache(); m_ownsPairCache = true; } if (!disableRaycastAccelerator) { - m_nullPairCache = new (btAlignedAlloc(sizeof(btNullPairCache),16)) btNullPairCache(); - m_raycastAccelerator = new (btAlignedAlloc(sizeof(btDbvtBroadphase),16)) btDbvtBroadphase(m_nullPairCache);//m_pairCache); - m_raycastAccelerator->m_deferedcollide = true;//don't add/remove pairs + m_nullPairCache = new (btAlignedAlloc(sizeof(btNullPairCache), 16)) btNullPairCache(); + m_raycastAccelerator = new (btAlignedAlloc(sizeof(btDbvtBroadphase), 16)) btDbvtBroadphase(m_nullPairCache); //m_pairCache); + m_raycastAccelerator->m_deferedcollide = true; //don't add/remove pairs } //btAssert(bounds.HasVolume()); @@ -382,13 +361,13 @@ m_raycastAccelerator(0) btVector3 aabbSize = m_worldAabbMax - m_worldAabbMin; - BP_FP_INT_TYPE maxInt = m_handleSentinel; + BP_FP_INT_TYPE maxInt = m_handleSentinel; - m_quantize = btVector3(btScalar(maxInt),btScalar(maxInt),btScalar(maxInt)) / aabbSize; + m_quantize = btVector3(btScalar(maxInt), btScalar(maxInt), btScalar(maxInt)) / aabbSize; // allocate handles buffer, using btAlignedAlloc, and put all handles on free list m_pHandles = new Handle[maxHandles]; - + m_maxHandles = maxHandles; m_numHandles = 0; @@ -404,14 +383,14 @@ m_raycastAccelerator(0) // allocate edge buffers for (int i = 0; i < 3; i++) { - m_pEdgesRawPtr[i] = btAlignedAlloc(sizeof(Edge)*maxHandles*2,16); - m_pEdges[i] = new(m_pEdgesRawPtr[i]) Edge[maxHandles * 2]; + m_pEdgesRawPtr[i] = btAlignedAlloc(sizeof(Edge) * maxHandles * 2, 16); + m_pEdges[i] = new (m_pEdgesRawPtr[i]) Edge[maxHandles * 2]; } } //removed overlap management // make boundary sentinels - + m_pHandles[0].m_clientObject = 0; for (int axis = 0; axis < 3; axis++) @@ -425,10 +404,8 @@ m_raycastAccelerator(0) m_pEdges[axis][1].m_handle = 0; #ifdef DEBUG_BROADPHASE debugPrintAxis(axis); -#endif //DEBUG_BROADPHASE - +#endif //DEBUG_BROADPHASE } - } template <typename BP_FP_INT_TYPE> @@ -439,14 +416,14 @@ btAxisSweep3Internal<BP_FP_INT_TYPE>::~btAxisSweep3Internal() m_nullPairCache->~btOverlappingPairCache(); btAlignedFree(m_nullPairCache); m_raycastAccelerator->~btDbvtBroadphase(); - btAlignedFree (m_raycastAccelerator); + btAlignedFree(m_raycastAccelerator); } for (int i = 2; i >= 0; i--) { btAlignedFree(m_pEdgesRawPtr[i]); } - delete [] m_pHandles; + delete[] m_pHandles; if (m_ownsPairCache) { @@ -470,13 +447,12 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::quantize(BP_FP_INT_TYPE* out, const b out[2] = (BP_FP_INT_TYPE)(((BP_FP_INT_TYPE)v.getZ() & m_bpHandleMask) | isMax); #else btVector3 v = (point - m_worldAabbMin) * m_quantize; - out[0]=(v[0]<=0)?(BP_FP_INT_TYPE)isMax:(v[0]>=m_handleSentinel)?(BP_FP_INT_TYPE)((m_handleSentinel&m_bpHandleMask)|isMax):(BP_FP_INT_TYPE)(((BP_FP_INT_TYPE)v[0]&m_bpHandleMask)|isMax); - out[1]=(v[1]<=0)?(BP_FP_INT_TYPE)isMax:(v[1]>=m_handleSentinel)?(BP_FP_INT_TYPE)((m_handleSentinel&m_bpHandleMask)|isMax):(BP_FP_INT_TYPE)(((BP_FP_INT_TYPE)v[1]&m_bpHandleMask)|isMax); - out[2]=(v[2]<=0)?(BP_FP_INT_TYPE)isMax:(v[2]>=m_handleSentinel)?(BP_FP_INT_TYPE)((m_handleSentinel&m_bpHandleMask)|isMax):(BP_FP_INT_TYPE)(((BP_FP_INT_TYPE)v[2]&m_bpHandleMask)|isMax); -#endif //OLD_CLAMPING_METHOD + out[0] = (v[0] <= 0) ? (BP_FP_INT_TYPE)isMax : (v[0] >= m_handleSentinel) ? (BP_FP_INT_TYPE)((m_handleSentinel & m_bpHandleMask) | isMax) : (BP_FP_INT_TYPE)(((BP_FP_INT_TYPE)v[0] & m_bpHandleMask) | isMax); + out[1] = (v[1] <= 0) ? (BP_FP_INT_TYPE)isMax : (v[1] >= m_handleSentinel) ? (BP_FP_INT_TYPE)((m_handleSentinel & m_bpHandleMask) | isMax) : (BP_FP_INT_TYPE)(((BP_FP_INT_TYPE)v[1] & m_bpHandleMask) | isMax); + out[2] = (v[2] <= 0) ? (BP_FP_INT_TYPE)isMax : (v[2] >= m_handleSentinel) ? (BP_FP_INT_TYPE)((m_handleSentinel & m_bpHandleMask) | isMax) : (BP_FP_INT_TYPE)(((BP_FP_INT_TYPE)v[2] & m_bpHandleMask) | isMax); +#endif //OLD_CLAMPING_METHOD } - template <typename BP_FP_INT_TYPE> BP_FP_INT_TYPE btAxisSweep3Internal<BP_FP_INT_TYPE>::allocHandle() { @@ -500,9 +476,8 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::freeHandle(BP_FP_INT_TYPE handle) m_numHandles--; } - template <typename BP_FP_INT_TYPE> -BP_FP_INT_TYPE btAxisSweep3Internal<BP_FP_INT_TYPE>::addHandle(const btVector3& aabbMin,const btVector3& aabbMax, void* pOwner, int collisionFilterGroup, int collisionFilterMask,btDispatcher* dispatcher) +BP_FP_INT_TYPE btAxisSweep3Internal<BP_FP_INT_TYPE>::addHandle(const btVector3& aabbMin, const btVector3& aabbMax, void* pOwner, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher) { // quantize the bounds BP_FP_INT_TYPE min[3], max[3]; @@ -511,10 +486,9 @@ BP_FP_INT_TYPE btAxisSweep3Internal<BP_FP_INT_TYPE>::addHandle(const btVector3& // allocate a handle BP_FP_INT_TYPE handle = allocHandle(); - Handle* pHandle = getHandle(handle); - + pHandle->m_uniqueId = static_cast<int>(handle); //pHandle->m_pOverlaps = 0; pHandle->m_clientObject = pOwner; @@ -524,11 +498,9 @@ BP_FP_INT_TYPE btAxisSweep3Internal<BP_FP_INT_TYPE>::addHandle(const btVector3& // compute current limit of edge arrays BP_FP_INT_TYPE limit = static_cast<BP_FP_INT_TYPE>(m_numHandles * 2); - // insert new edges just inside the max boundary edge for (BP_FP_INT_TYPE axis = 0; axis < 3; axis++) { - m_pHandles[0].m_maxEdges[axis] += 2; m_pEdges[axis][limit + 1] = m_pEdges[axis][limit - 1]; @@ -544,22 +516,19 @@ BP_FP_INT_TYPE btAxisSweep3Internal<BP_FP_INT_TYPE>::addHandle(const btVector3& } // now sort the new edges to their correct position - sortMinDown(0, pHandle->m_minEdges[0], dispatcher,false); - sortMaxDown(0, pHandle->m_maxEdges[0], dispatcher,false); - sortMinDown(1, pHandle->m_minEdges[1], dispatcher,false); - sortMaxDown(1, pHandle->m_maxEdges[1], dispatcher,false); - sortMinDown(2, pHandle->m_minEdges[2], dispatcher,true); - sortMaxDown(2, pHandle->m_maxEdges[2], dispatcher,true); - + sortMinDown(0, pHandle->m_minEdges[0], dispatcher, false); + sortMaxDown(0, pHandle->m_maxEdges[0], dispatcher, false); + sortMinDown(1, pHandle->m_minEdges[1], dispatcher, false); + sortMaxDown(1, pHandle->m_maxEdges[1], dispatcher, false); + sortMinDown(2, pHandle->m_minEdges[2], dispatcher, true); + sortMaxDown(2, pHandle->m_maxEdges[2], dispatcher, true); return handle; } - template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::removeHandle(BP_FP_INT_TYPE handle,btDispatcher* dispatcher) +void btAxisSweep3Internal<BP_FP_INT_TYPE>::removeHandle(BP_FP_INT_TYPE handle, btDispatcher* dispatcher) { - Handle* pHandle = getHandle(handle); //explicitly remove the pairs containing the proxy @@ -567,50 +536,43 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::removeHandle(BP_FP_INT_TYPE handle,bt ///@todo: compare performance if (!m_pairCache->hasDeferredRemoval()) { - m_pairCache->removeOverlappingPairsContainingProxy(pHandle,dispatcher); + m_pairCache->removeOverlappingPairsContainingProxy(pHandle, dispatcher); } // compute current limit of edge arrays int limit = static_cast<int>(m_numHandles * 2); - + int axis; - for (axis = 0;axis<3;axis++) + for (axis = 0; axis < 3; axis++) { m_pHandles[0].m_maxEdges[axis] -= 2; } // remove the edges by sorting them up to the end of the list - for ( axis = 0; axis < 3; axis++) + for (axis = 0; axis < 3; axis++) { Edge* pEdges = m_pEdges[axis]; BP_FP_INT_TYPE max = pHandle->m_maxEdges[axis]; pEdges[max].m_pos = m_handleSentinel; - sortMaxUp(axis,max,dispatcher,false); - + sortMaxUp(axis, max, dispatcher, false); BP_FP_INT_TYPE i = pHandle->m_minEdges[axis]; pEdges[i].m_pos = m_handleSentinel; + sortMinUp(axis, i, dispatcher, false); - sortMinUp(axis,i,dispatcher,false); + pEdges[limit - 1].m_handle = 0; + pEdges[limit - 1].m_pos = m_handleSentinel; - pEdges[limit-1].m_handle = 0; - pEdges[limit-1].m_pos = m_handleSentinel; - #ifdef DEBUG_BROADPHASE - debugPrintAxis(axis,false); -#endif //DEBUG_BROADPHASE - - + debugPrintAxis(axis, false); +#endif //DEBUG_BROADPHASE } - // free the handle freeHandle(handle); - - } template <typename BP_FP_INT_TYPE> @@ -625,19 +587,16 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::resetPool(btDispatcher* /*dispatcher* m_pHandles[m_maxHandles - 1].SetNextFree(0); } } -} - +} //#include <stdio.h> template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::calculateOverlappingPairs(btDispatcher* dispatcher) +void btAxisSweep3Internal<BP_FP_INT_TYPE>::calculateOverlappingPairs(btDispatcher* dispatcher) { - if (m_pairCache->hasDeferredRemoval()) { - - btBroadphasePairArray& overlappingPairArray = m_pairCache->getOverlappingPairArray(); + btBroadphasePairArray& overlappingPairArray = m_pairCache->getOverlappingPairArray(); //perform a sort, to find duplicates and to sort 'invalid' pairs to the end overlappingPairArray.quickSort(btBroadphasePairSortPredicate()); @@ -645,18 +604,15 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::calculateOverlappingPairs(btDispatche overlappingPairArray.resize(overlappingPairArray.size() - m_invalidPair); m_invalidPair = 0; - int i; btBroadphasePair previousPair; previousPair.m_pProxy0 = 0; previousPair.m_pProxy1 = 0; previousPair.m_algorithm = 0; - - - for (i=0;i<overlappingPairArray.size();i++) + + for (i = 0; i < overlappingPairArray.size(); i++) { - btBroadphasePair& pair = overlappingPairArray[i]; bool isDuplicate = (pair == previousPair); @@ -668,91 +624,90 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::calculateOverlappingPairs(btDispatche if (!isDuplicate) { ///important to use an AABB test that is consistent with the broadphase - bool hasOverlap = testAabbOverlap(pair.m_pProxy0,pair.m_pProxy1); + bool hasOverlap = testAabbOverlap(pair.m_pProxy0, pair.m_pProxy1); if (hasOverlap) { - needsRemoval = false;//callback->processOverlap(pair); - } else + needsRemoval = false; //callback->processOverlap(pair); + } + else { needsRemoval = true; } - } else + } + else { //remove duplicate needsRemoval = true; //should have no algorithm btAssert(!pair.m_algorithm); } - + if (needsRemoval) { - m_pairCache->cleanOverlappingPair(pair,dispatcher); + m_pairCache->cleanOverlappingPair(pair, dispatcher); - // m_overlappingPairArray.swap(i,m_overlappingPairArray.size()-1); - // m_overlappingPairArray.pop_back(); + // m_overlappingPairArray.swap(i,m_overlappingPairArray.size()-1); + // m_overlappingPairArray.pop_back(); pair.m_pProxy0 = 0; pair.m_pProxy1 = 0; m_invalidPair++; - } - - } + } + } - ///if you don't like to skip the invalid pairs in the array, execute following code: - #define CLEAN_INVALID_PAIRS 1 - #ifdef CLEAN_INVALID_PAIRS +///if you don't like to skip the invalid pairs in the array, execute following code: +#define CLEAN_INVALID_PAIRS 1 +#ifdef CLEAN_INVALID_PAIRS //perform a sort, to sort 'invalid' pairs to the end overlappingPairArray.quickSort(btBroadphasePairSortPredicate()); overlappingPairArray.resize(overlappingPairArray.size() - m_invalidPair); m_invalidPair = 0; - #endif//CLEAN_INVALID_PAIRS - +#endif //CLEAN_INVALID_PAIRS + //printf("overlappingPairArray.size()=%d\n",overlappingPairArray.size()); } - } - template <typename BP_FP_INT_TYPE> -bool btAxisSweep3Internal<BP_FP_INT_TYPE>::testAabbOverlap(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) +bool btAxisSweep3Internal<BP_FP_INT_TYPE>::testAabbOverlap(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) { const Handle* pHandleA = static_cast<Handle*>(proxy0); const Handle* pHandleB = static_cast<Handle*>(proxy1); - + //optimization 1: check the array index (memory address), instead of the m_pos for (int axis = 0; axis < 3; axis++) - { - if (pHandleA->m_maxEdges[axis] < pHandleB->m_minEdges[axis] || - pHandleB->m_maxEdges[axis] < pHandleA->m_minEdges[axis]) - { - return false; - } - } + { + if (pHandleA->m_maxEdges[axis] < pHandleB->m_minEdges[axis] || + pHandleB->m_maxEdges[axis] < pHandleA->m_minEdges[axis]) + { + return false; + } + } return true; } template <typename BP_FP_INT_TYPE> -bool btAxisSweep3Internal<BP_FP_INT_TYPE>::testOverlap2D(const Handle* pHandleA, const Handle* pHandleB,int axis0,int axis1) +bool btAxisSweep3Internal<BP_FP_INT_TYPE>::testOverlap2D(const Handle* pHandleA, const Handle* pHandleB, int axis0, int axis1) { //optimization 1: check the array index (memory address), instead of the m_pos - if (pHandleA->m_maxEdges[axis0] < pHandleB->m_minEdges[axis0] || + if (pHandleA->m_maxEdges[axis0] < pHandleB->m_minEdges[axis0] || pHandleB->m_maxEdges[axis0] < pHandleA->m_minEdges[axis0] || pHandleA->m_maxEdges[axis1] < pHandleB->m_minEdges[axis1] || - pHandleB->m_maxEdges[axis1] < pHandleA->m_minEdges[axis1]) - { - return false; - } + pHandleB->m_maxEdges[axis1] < pHandleA->m_minEdges[axis1]) + { + return false; + } return true; } template <typename BP_FP_INT_TYPE> -void btAxisSweep3Internal<BP_FP_INT_TYPE>::updateHandle(BP_FP_INT_TYPE handle, const btVector3& aabbMin,const btVector3& aabbMax,btDispatcher* dispatcher) +void btAxisSweep3Internal<BP_FP_INT_TYPE>::updateHandle(BP_FP_INT_TYPE handle, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* dispatcher) { -// btAssert(bounds.IsFinite()); + // btAssert(bounds.IsFinite()); //btAssert(bounds.HasVolume()); Handle* pHandle = getHandle(handle); @@ -776,34 +731,28 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::updateHandle(BP_FP_INT_TYPE handle, c // expand (only adds overlaps) if (dmin < 0) - sortMinDown(axis, emin,dispatcher,true); + sortMinDown(axis, emin, dispatcher, true); if (dmax > 0) - sortMaxUp(axis, emax,dispatcher,true); + sortMaxUp(axis, emax, dispatcher, true); // shrink (only removes overlaps) if (dmin > 0) - sortMinUp(axis, emin,dispatcher,true); + sortMinUp(axis, emin, dispatcher, true); if (dmax < 0) - sortMaxDown(axis, emax,dispatcher,true); + sortMaxDown(axis, emax, dispatcher, true); #ifdef DEBUG_BROADPHASE - debugPrintAxis(axis); -#endif //DEBUG_BROADPHASE + debugPrintAxis(axis); +#endif //DEBUG_BROADPHASE } - - } - - - // sorting a min edge downwards can only ever *add* overlaps template <typename BP_FP_INT_TYPE> void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMinDown(int axis, BP_FP_INT_TYPE edge, btDispatcher* /* dispatcher */, bool updateOverlaps) { - Edge* pEdge = m_pEdges[axis] + edge; Edge* pPrev = pEdge - 1; Handle* pHandleEdge = getHandle(pEdge->m_handle); @@ -815,16 +764,15 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMinDown(int axis, BP_FP_INT_TYPE if (pPrev->IsMax()) { // if previous edge is a maximum check the bounds and add an overlap if necessary - const int axis1 = (1 << axis) & 3; - const int axis2 = (1 << axis1) & 3; - if (updateOverlaps && testOverlap2D(pHandleEdge, pHandlePrev,axis1,axis2)) + const int axis1 = (1 << axis) & 3; + const int axis2 = (1 << axis1) & 3; + if (updateOverlaps && testOverlap2D(pHandleEdge, pHandlePrev, axis1, axis2)) { - m_pairCache->addOverlappingPair(pHandleEdge,pHandlePrev); + m_pairCache->addOverlappingPair(pHandleEdge, pHandlePrev); if (m_userPairCallback) - m_userPairCallback->addOverlappingPair(pHandleEdge,pHandlePrev); + m_userPairCallback->addOverlappingPair(pHandleEdge, pHandlePrev); //AddOverlap(pEdge->m_handle, pPrev->m_handle); - } // update edge reference in other handle @@ -847,8 +795,7 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMinDown(int axis, BP_FP_INT_TYPE #ifdef DEBUG_BROADPHASE debugPrintAxis(axis); -#endif //DEBUG_BROADPHASE - +#endif //DEBUG_BROADPHASE } // sorting a min edge upwards can only ever *remove* overlaps @@ -867,25 +814,21 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMinUp(int axis, BP_FP_INT_TYPE ed { Handle* handle0 = getHandle(pEdge->m_handle); Handle* handle1 = getHandle(pNext->m_handle); - const int axis1 = (1 << axis) & 3; - const int axis2 = (1 << axis1) & 3; - + const int axis1 = (1 << axis) & 3; + const int axis2 = (1 << axis1) & 3; + // if next edge is maximum remove any overlap between the two handles - if (updateOverlaps + if (updateOverlaps #ifdef USE_OVERLAP_TEST_ON_REMOVES - && testOverlap2D(handle0,handle1,axis1,axis2) -#endif //USE_OVERLAP_TEST_ON_REMOVES - ) + && testOverlap2D(handle0, handle1, axis1, axis2) +#endif //USE_OVERLAP_TEST_ON_REMOVES + ) { - - - m_pairCache->removeOverlappingPair(handle0,handle1,dispatcher); + m_pairCache->removeOverlappingPair(handle0, handle1, dispatcher); if (m_userPairCallback) - m_userPairCallback->removeOverlappingPair(handle0,handle1,dispatcher); - + m_userPairCallback->removeOverlappingPair(handle0, handle1, dispatcher); } - // update edge reference in other handle pHandleNext->m_maxEdges[axis]--; } @@ -903,15 +846,12 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMinUp(int axis, BP_FP_INT_TYPE ed pEdge++; pNext++; } - - } // sorting a max edge downwards can only ever *remove* overlaps template <typename BP_FP_INT_TYPE> void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMaxDown(int axis, BP_FP_INT_TYPE edge, btDispatcher* dispatcher, bool updateOverlaps) { - Edge* pEdge = m_pEdges[axis] + edge; Edge* pPrev = pEdge - 1; Handle* pHandleEdge = getHandle(pEdge->m_handle); @@ -925,28 +865,25 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMaxDown(int axis, BP_FP_INT_TYPE // if previous edge was a minimum remove any overlap between the two handles Handle* handle0 = getHandle(pEdge->m_handle); Handle* handle1 = getHandle(pPrev->m_handle); - const int axis1 = (1 << axis) & 3; - const int axis2 = (1 << axis1) & 3; + const int axis1 = (1 << axis) & 3; + const int axis2 = (1 << axis1) & 3; - if (updateOverlaps + if (updateOverlaps #ifdef USE_OVERLAP_TEST_ON_REMOVES - && testOverlap2D(handle0,handle1,axis1,axis2) -#endif //USE_OVERLAP_TEST_ON_REMOVES - ) + && testOverlap2D(handle0, handle1, axis1, axis2) +#endif //USE_OVERLAP_TEST_ON_REMOVES + ) { //this is done during the overlappingpairarray iteration/narrowphase collision - - m_pairCache->removeOverlappingPair(handle0,handle1,dispatcher); + m_pairCache->removeOverlappingPair(handle0, handle1, dispatcher); if (m_userPairCallback) - m_userPairCallback->removeOverlappingPair(handle0,handle1,dispatcher); - - - + m_userPairCallback->removeOverlappingPair(handle0, handle1, dispatcher); } // update edge reference in other handle - pHandlePrev->m_minEdges[axis]++;; + pHandlePrev->m_minEdges[axis]++; + ; } else pHandlePrev->m_maxEdges[axis]++; @@ -963,11 +900,9 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMaxDown(int axis, BP_FP_INT_TYPE pPrev--; } - #ifdef DEBUG_BROADPHASE debugPrintAxis(axis); -#endif //DEBUG_BROADPHASE - +#endif //DEBUG_BROADPHASE } // sorting a max edge upwards can only ever *add* overlaps @@ -982,19 +917,19 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMaxUp(int axis, BP_FP_INT_TYPE ed { Handle* pHandleNext = getHandle(pNext->m_handle); - const int axis1 = (1 << axis) & 3; - const int axis2 = (1 << axis1) & 3; + const int axis1 = (1 << axis) & 3; + const int axis2 = (1 << axis1) & 3; if (!pNext->IsMax()) { // if next edge is a minimum check the bounds and add an overlap if necessary - if (updateOverlaps && testOverlap2D(pHandleEdge, pHandleNext,axis1,axis2)) + if (updateOverlaps && testOverlap2D(pHandleEdge, pHandleNext, axis1, axis2)) { Handle* handle0 = getHandle(pEdge->m_handle); Handle* handle1 = getHandle(pNext->m_handle); - m_pairCache->addOverlappingPair(handle0,handle1); + m_pairCache->addOverlappingPair(handle0, handle1); if (m_userPairCallback) - m_userPairCallback->addOverlappingPair(handle0,handle1); + m_userPairCallback->addOverlappingPair(handle0, handle1); } // update edge reference in other handle @@ -1014,7 +949,6 @@ void btAxisSweep3Internal<BP_FP_INT_TYPE>::sortMaxUp(int axis, BP_FP_INT_TYPE ed pEdge++; pNext++; } - } #endif diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseInterface.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseInterface.h index fb68e0024e..b097eca5f5 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseInterface.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseInterface.h @@ -13,10 +13,8 @@ subject to the following restrictions: 3. This notice may not be removed or altered from any source distribution. */ -#ifndef BT_BROADPHASE_INTERFACE_H -#define BT_BROADPHASE_INTERFACE_H - - +#ifndef BT_BROADPHASE_INTERFACE_H +#define BT_BROADPHASE_INTERFACE_H struct btDispatcherInfo; class btDispatcher; @@ -24,27 +22,23 @@ class btDispatcher; class btOverlappingPairCache; - - -struct btBroadphaseAabbCallback +struct btBroadphaseAabbCallback { virtual ~btBroadphaseAabbCallback() {} - virtual bool process(const btBroadphaseProxy* proxy) = 0; + virtual bool process(const btBroadphaseProxy* proxy) = 0; }; - -struct btBroadphaseRayCallback : public btBroadphaseAabbCallback +struct btBroadphaseRayCallback : public btBroadphaseAabbCallback { ///added some cached data to accelerate ray-AABB tests - btVector3 m_rayDirectionInverse; - unsigned int m_signs[3]; - btScalar m_lambda_max; + btVector3 m_rayDirectionInverse; + unsigned int m_signs[3]; + btScalar m_lambda_max; virtual ~btBroadphaseRayCallback() {} - + protected: - - btBroadphaseRayCallback() {} + btBroadphaseRayCallback() {} }; #include "LinearMath/btVector3.h" @@ -57,30 +51,29 @@ class btBroadphaseInterface public: virtual ~btBroadphaseInterface() {} - virtual btBroadphaseProxy* createProxy( const btVector3& aabbMin, const btVector3& aabbMax,int shapeType,void* userPtr, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher) =0; - virtual void destroyProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher)=0; - virtual void setAabb(btBroadphaseProxy* proxy,const btVector3& aabbMin,const btVector3& aabbMax, btDispatcher* dispatcher)=0; - virtual void getAabb(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const =0; + virtual btBroadphaseProxy* createProxy(const btVector3& aabbMin, const btVector3& aabbMax, int shapeType, void* userPtr, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher) = 0; + virtual void destroyProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher) = 0; + virtual void setAabb(btBroadphaseProxy* proxy, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* dispatcher) = 0; + virtual void getAabb(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const = 0; - virtual void rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin=btVector3(0,0,0), const btVector3& aabbMax = btVector3(0,0,0)) = 0; + virtual void rayTest(const btVector3& rayFrom, const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin = btVector3(0, 0, 0), const btVector3& aabbMax = btVector3(0, 0, 0)) = 0; - virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback) = 0; + virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback) = 0; ///calculateOverlappingPairs is optional: incremental algorithms (sweep and prune) might do it during the set aabb - virtual void calculateOverlappingPairs(btDispatcher* dispatcher)=0; + virtual void calculateOverlappingPairs(btDispatcher* dispatcher) = 0; - virtual btOverlappingPairCache* getOverlappingPairCache()=0; - virtual const btOverlappingPairCache* getOverlappingPairCache() const =0; + virtual btOverlappingPairCache* getOverlappingPairCache() = 0; + virtual const btOverlappingPairCache* getOverlappingPairCache() const = 0; ///getAabb returns the axis aligned bounding box in the 'global' coordinate frame ///will add some transform later - virtual void getBroadphaseAabb(btVector3& aabbMin,btVector3& aabbMax) const =0; + virtual void getBroadphaseAabb(btVector3& aabbMin, btVector3& aabbMax) const = 0; ///reset broadphase internal structures, to ensure determinism/reproducability - virtual void resetPool(btDispatcher* dispatcher) { (void) dispatcher; }; - - virtual void printStats() = 0; + virtual void resetPool(btDispatcher* dispatcher) { (void)dispatcher; }; + virtual void printStats() = 0; }; -#endif //BT_BROADPHASE_INTERFACE_H +#endif //BT_BROADPHASE_INTERFACE_H diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.cpp index 0fd4ef46be..7ee065aac3 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.cpp @@ -15,4 +15,4 @@ subject to the following restrictions: #include "btBroadphaseProxy.h" -BT_NOT_EMPTY_FILE // fix warning LNK4221: This object file does not define any previously undefined public symbols, so it will not be used by any link operation that consumes this library +BT_NOT_EMPTY_FILE // fix warning LNK4221: This object file does not define any previously undefined public symbols, so it will not be used by any link operation that consumes this library diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.h index f6e1202a69..825caeef56 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btBroadphaseProxy.h @@ -16,11 +16,10 @@ subject to the following restrictions: #ifndef BT_BROADPHASE_PROXY_H #define BT_BROADPHASE_PROXY_H -#include "LinearMath/btScalar.h" //for SIMD_FORCE_INLINE +#include "LinearMath/btScalar.h" //for SIMD_FORCE_INLINE #include "LinearMath/btVector3.h" #include "LinearMath/btAlignedAllocator.h" - /// btDispatcher uses these types /// IMPORTANT NOTE:The types are ordered polyhedral, implicit convex and concave /// to facilitate type checking @@ -35,8 +34,8 @@ enum BroadphaseNativeTypes CONVEX_HULL_SHAPE_PROXYTYPE, CONVEX_POINT_CLOUD_SHAPE_PROXYTYPE, CUSTOM_POLYHEDRAL_SHAPE_TYPE, -//implicit convex shapes -IMPLICIT_CONVEX_SHAPES_START_HERE, + //implicit convex shapes + IMPLICIT_CONVEX_SHAPES_START_HERE, SPHERE_SHAPE_PROXYTYPE, MULTI_SPHERE_SHAPE_PROXYTYPE, CAPSULE_SHAPE_PROXYTYPE, @@ -49,8 +48,8 @@ IMPLICIT_CONVEX_SHAPES_START_HERE, BOX_2D_SHAPE_PROXYTYPE, CONVEX_2D_SHAPE_PROXYTYPE, CUSTOM_CONVEX_SHAPE_TYPE, -//concave shapes -CONCAVE_SHAPES_START_HERE, + //concave shapes + CONCAVE_SHAPES_START_HERE, //keep all the convex shapetype below here, for the check IsConvexShape in broadphase proxy! TRIANGLE_MESH_SHAPE_PROXYTYPE, SCALED_TRIANGLE_MESH_SHAPE_PROXYTYPE, @@ -58,16 +57,16 @@ CONCAVE_SHAPES_START_HERE, FAST_CONCAVE_MESH_PROXYTYPE, //terrain TERRAIN_SHAPE_PROXYTYPE, -///Used for GIMPACT Trimesh integration + ///Used for GIMPACT Trimesh integration GIMPACT_SHAPE_PROXYTYPE, -///Multimaterial mesh - MULTIMATERIAL_TRIANGLE_MESH_PROXYTYPE, - + ///Multimaterial mesh + MULTIMATERIAL_TRIANGLE_MESH_PROXYTYPE, + EMPTY_SHAPE_PROXYTYPE, STATIC_PLANE_PROXYTYPE, CUSTOM_CONCAVE_SHAPE_TYPE, - SDF_SHAPE_PROXYTYPE=CUSTOM_CONCAVE_SHAPE_TYPE, -CONCAVE_SHAPES_END_HERE, + SDF_SHAPE_PROXYTYPE = CUSTOM_CONCAVE_SHAPE_TYPE, + CONCAVE_SHAPES_END_HERE, COMPOUND_SHAPE_PROXYTYPE, @@ -77,38 +76,37 @@ CONCAVE_SHAPES_END_HERE, INVALID_SHAPE_PROXYTYPE, MAX_BROADPHASE_COLLISION_TYPES - -}; +}; -///The btBroadphaseProxy is the main class that can be used with the Bullet broadphases. +///The btBroadphaseProxy is the main class that can be used with the Bullet broadphases. ///It stores collision shape type information, collision filter information and a client object, typically a btCollisionObject or btRigidBody. -ATTRIBUTE_ALIGNED16(struct) btBroadphaseProxy +ATTRIBUTE_ALIGNED16(struct) +btBroadphaseProxy { + BT_DECLARE_ALIGNED_ALLOCATOR(); -BT_DECLARE_ALIGNED_ALLOCATOR(); - ///optional filtering to cull potential collisions enum CollisionFilterGroups { - DefaultFilter = 1, - StaticFilter = 2, - KinematicFilter = 4, - DebrisFilter = 8, - SensorTrigger = 16, - CharacterFilter = 32, - AllFilter = -1 //all bits sets: DefaultFilter | StaticFilter | KinematicFilter | DebrisFilter | SensorTrigger + DefaultFilter = 1, + StaticFilter = 2, + KinematicFilter = 4, + DebrisFilter = 8, + SensorTrigger = 16, + CharacterFilter = 32, + AllFilter = -1 //all bits sets: DefaultFilter | StaticFilter | KinematicFilter | DebrisFilter | SensorTrigger }; //Usually the client btCollisionObject or Rigidbody class - void* m_clientObject; - int m_collisionFilterGroup; - int m_collisionFilterMask; + void* m_clientObject; + int m_collisionFilterGroup; + int m_collisionFilterMask; - int m_uniqueId;//m_uniqueId is introduced for paircache. could get rid of this, by calculating the address offset etc. + int m_uniqueId; //m_uniqueId is introduced for paircache. could get rid of this, by calculating the address offset etc. - btVector3 m_aabbMin; - btVector3 m_aabbMax; + btVector3 m_aabbMin; + btVector3 m_aabbMax; SIMD_FORCE_INLINE int getUid() const { @@ -116,47 +114,45 @@ BT_DECLARE_ALIGNED_ALLOCATOR(); } //used for memory pools - btBroadphaseProxy() :m_clientObject(0) + btBroadphaseProxy() : m_clientObject(0) { } - btBroadphaseProxy(const btVector3& aabbMin,const btVector3& aabbMax,void* userPtr, int collisionFilterGroup, int collisionFilterMask) - :m_clientObject(userPtr), - m_collisionFilterGroup(collisionFilterGroup), - m_collisionFilterMask(collisionFilterMask), - m_aabbMin(aabbMin), - m_aabbMax(aabbMax) + btBroadphaseProxy(const btVector3& aabbMin, const btVector3& aabbMax, void* userPtr, int collisionFilterGroup, int collisionFilterMask) + : m_clientObject(userPtr), + m_collisionFilterGroup(collisionFilterGroup), + m_collisionFilterMask(collisionFilterMask), + m_aabbMin(aabbMin), + m_aabbMax(aabbMax) { } - - static SIMD_FORCE_INLINE bool isPolyhedral(int proxyType) { - return (proxyType < IMPLICIT_CONVEX_SHAPES_START_HERE); + return (proxyType < IMPLICIT_CONVEX_SHAPES_START_HERE); } - static SIMD_FORCE_INLINE bool isConvex(int proxyType) + static SIMD_FORCE_INLINE bool isConvex(int proxyType) { return (proxyType < CONCAVE_SHAPES_START_HERE); } - static SIMD_FORCE_INLINE bool isNonMoving(int proxyType) + static SIMD_FORCE_INLINE bool isNonMoving(int proxyType) { - return (isConcave(proxyType) && !(proxyType==GIMPACT_SHAPE_PROXYTYPE)); + return (isConcave(proxyType) && !(proxyType == GIMPACT_SHAPE_PROXYTYPE)); } - static SIMD_FORCE_INLINE bool isConcave(int proxyType) + static SIMD_FORCE_INLINE bool isConcave(int proxyType) { return ((proxyType > CONCAVE_SHAPES_START_HERE) && - (proxyType < CONCAVE_SHAPES_END_HERE)); + (proxyType < CONCAVE_SHAPES_END_HERE)); } - static SIMD_FORCE_INLINE bool isCompound(int proxyType) + static SIMD_FORCE_INLINE bool isCompound(int proxyType) { return (proxyType == COMPOUND_SHAPE_PROXYTYPE); } - static SIMD_FORCE_INLINE bool isSoftBody(int proxyType) + static SIMD_FORCE_INLINE bool isSoftBody(int proxyType) { return (proxyType == SOFTBODY_SHAPE_PROXYTYPE); } @@ -168,67 +164,62 @@ BT_DECLARE_ALIGNED_ALLOCATOR(); static SIMD_FORCE_INLINE bool isConvex2d(int proxyType) { - return (proxyType == BOX_2D_SHAPE_PROXYTYPE) || (proxyType == CONVEX_2D_SHAPE_PROXYTYPE); + return (proxyType == BOX_2D_SHAPE_PROXYTYPE) || (proxyType == CONVEX_2D_SHAPE_PROXYTYPE); } - - -} -; +}; class btCollisionAlgorithm; struct btBroadphaseProxy; - - ///The btBroadphasePair class contains a pair of aabb-overlapping objects. ///A btDispatcher can search a btCollisionAlgorithm that performs exact/narrowphase collision detection on the actual collision shapes. -ATTRIBUTE_ALIGNED16(struct) btBroadphasePair +ATTRIBUTE_ALIGNED16(struct) +btBroadphasePair { - btBroadphasePair () - : - m_pProxy0(0), - m_pProxy1(0), - m_algorithm(0), - m_internalInfo1(0) + btBroadphasePair() + : m_pProxy0(0), + m_pProxy1(0), + m_algorithm(0), + m_internalInfo1(0) { } -BT_DECLARE_ALIGNED_ALLOCATOR(); + BT_DECLARE_ALIGNED_ALLOCATOR(); btBroadphasePair(const btBroadphasePair& other) - : m_pProxy0(other.m_pProxy0), - m_pProxy1(other.m_pProxy1), - m_algorithm(other.m_algorithm), - m_internalInfo1(other.m_internalInfo1) + : m_pProxy0(other.m_pProxy0), + m_pProxy1(other.m_pProxy1), + m_algorithm(other.m_algorithm), + m_internalInfo1(other.m_internalInfo1) { } - btBroadphasePair(btBroadphaseProxy& proxy0,btBroadphaseProxy& proxy1) + btBroadphasePair(btBroadphaseProxy & proxy0, btBroadphaseProxy & proxy1) { - //keep them sorted, so the std::set operations work if (proxy0.m_uniqueId < proxy1.m_uniqueId) - { - m_pProxy0 = &proxy0; - m_pProxy1 = &proxy1; - } - else - { - m_pProxy0 = &proxy1; - m_pProxy1 = &proxy0; - } + { + m_pProxy0 = &proxy0; + m_pProxy1 = &proxy1; + } + else + { + m_pProxy0 = &proxy1; + m_pProxy1 = &proxy0; + } m_algorithm = 0; m_internalInfo1 = 0; - } - + btBroadphaseProxy* m_pProxy0; btBroadphaseProxy* m_pProxy1; - - mutable btCollisionAlgorithm* m_algorithm; - union { void* m_internalInfo1; int m_internalTmpValue;};//don't use this data, it will be removed in future version. + mutable btCollisionAlgorithm* m_algorithm; + union { + void* m_internalInfo1; + int m_internalTmpValue; + }; //don't use this data, it will be removed in future version. }; /* @@ -240,31 +231,25 @@ SIMD_FORCE_INLINE bool operator<(const btBroadphasePair& a, const btBroadphasePa } */ - - class btBroadphasePairSortPredicate { - public: - - bool operator() ( const btBroadphasePair& a, const btBroadphasePair& b ) const - { - const int uidA0 = a.m_pProxy0 ? a.m_pProxy0->m_uniqueId : -1; - const int uidB0 = b.m_pProxy0 ? b.m_pProxy0->m_uniqueId : -1; - const int uidA1 = a.m_pProxy1 ? a.m_pProxy1->m_uniqueId : -1; - const int uidB1 = b.m_pProxy1 ? b.m_pProxy1->m_uniqueId : -1; - - return uidA0 > uidB0 || - (a.m_pProxy0 == b.m_pProxy0 && uidA1 > uidB1) || - (a.m_pProxy0 == b.m_pProxy0 && a.m_pProxy1 == b.m_pProxy1 && a.m_algorithm > b.m_algorithm); - } +public: + bool operator()(const btBroadphasePair& a, const btBroadphasePair& b) const + { + const int uidA0 = a.m_pProxy0 ? a.m_pProxy0->m_uniqueId : -1; + const int uidB0 = b.m_pProxy0 ? b.m_pProxy0->m_uniqueId : -1; + const int uidA1 = a.m_pProxy1 ? a.m_pProxy1->m_uniqueId : -1; + const int uidB1 = b.m_pProxy1 ? b.m_pProxy1->m_uniqueId : -1; + + return uidA0 > uidB0 || + (a.m_pProxy0 == b.m_pProxy0 && uidA1 > uidB1) || + (a.m_pProxy0 == b.m_pProxy0 && a.m_pProxy1 == b.m_pProxy1 && a.m_algorithm > b.m_algorithm); + } }; - -SIMD_FORCE_INLINE bool operator==(const btBroadphasePair& a, const btBroadphasePair& b) +SIMD_FORCE_INLINE bool operator==(const btBroadphasePair& a, const btBroadphasePair& b) { - return (a.m_pProxy0 == b.m_pProxy0) && (a.m_pProxy1 == b.m_pProxy1); + return (a.m_pProxy0 == b.m_pProxy0) && (a.m_pProxy1 == b.m_pProxy1); } - -#endif //BT_BROADPHASE_PROXY_H - +#endif //BT_BROADPHASE_PROXY_H diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.cpp index c95d1be0f2..6e36d3bd73 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.cpp @@ -20,4 +20,3 @@ btCollisionAlgorithm::btCollisionAlgorithm(const btCollisionAlgorithmConstructio { m_dispatcher = ci.m_dispatcher1; } - diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.h index 405656236b..b00c0b1b4b 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btCollisionAlgorithm.h @@ -25,57 +25,51 @@ class btManifoldResult; class btCollisionObject; struct btCollisionObjectWrapper; struct btDispatcherInfo; -class btPersistentManifold; +class btPersistentManifold; -typedef btAlignedObjectArray<btPersistentManifold*> btManifoldArray; +typedef btAlignedObjectArray<btPersistentManifold*> btManifoldArray; struct btCollisionAlgorithmConstructionInfo { btCollisionAlgorithmConstructionInfo() - :m_dispatcher1(0), - m_manifold(0) + : m_dispatcher1(0), + m_manifold(0) { } - btCollisionAlgorithmConstructionInfo(btDispatcher* dispatcher,int temp) - :m_dispatcher1(dispatcher) + btCollisionAlgorithmConstructionInfo(btDispatcher* dispatcher, int temp) + : m_dispatcher1(dispatcher) { (void)temp; } - btDispatcher* m_dispatcher1; - btPersistentManifold* m_manifold; - -// int getDispatcherId(); + btDispatcher* m_dispatcher1; + btPersistentManifold* m_manifold; + // int getDispatcherId(); }; - ///btCollisionAlgorithm is an collision interface that is compatible with the Broadphase and btDispatcher. ///It is persistent over frames class btCollisionAlgorithm { - protected: - - btDispatcher* m_dispatcher; + btDispatcher* m_dispatcher; protected: -// int getDispatcherId(); - -public: + // int getDispatcherId(); - btCollisionAlgorithm() {}; +public: + btCollisionAlgorithm(){}; btCollisionAlgorithm(const btCollisionAlgorithmConstructionInfo& ci); - virtual ~btCollisionAlgorithm() {}; + virtual ~btCollisionAlgorithm(){}; - virtual void processCollision (const btCollisionObjectWrapper* body0Wrap,const btCollisionObjectWrapper* body1Wrap,const btDispatcherInfo& dispatchInfo,btManifoldResult* resultOut) = 0; + virtual void processCollision(const btCollisionObjectWrapper* body0Wrap, const btCollisionObjectWrapper* body1Wrap, const btDispatcherInfo& dispatchInfo, btManifoldResult* resultOut) = 0; - virtual btScalar calculateTimeOfImpact(btCollisionObject* body0,btCollisionObject* body1,const btDispatcherInfo& dispatchInfo,btManifoldResult* resultOut) = 0; + virtual btScalar calculateTimeOfImpact(btCollisionObject* body0, btCollisionObject* body1, const btDispatcherInfo& dispatchInfo, btManifoldResult* resultOut) = 0; - virtual void getAllContactManifolds(btManifoldArray& manifoldArray) = 0; + virtual void getAllContactManifolds(btManifoldArray& manifoldArray) = 0; }; - -#endif //BT_COLLISION_ALGORITHM_H +#endif //BT_COLLISION_ALGORITHM_H diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.cpp index d791d07418..166cb04c0b 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.cpp @@ -17,210 +17,226 @@ subject to the following restrictions: #include "btDbvt.h" // -typedef btAlignedObjectArray<btDbvtNode*> tNodeArray; -typedef btAlignedObjectArray<const btDbvtNode*> tConstNodeArray; +typedef btAlignedObjectArray<btDbvtNode*> tNodeArray; +typedef btAlignedObjectArray<const btDbvtNode*> tConstNodeArray; // struct btDbvtNodeEnumerator : btDbvt::ICollide { - tConstNodeArray nodes; + tConstNodeArray nodes; void Process(const btDbvtNode* n) { nodes.push_back(n); } }; // -static DBVT_INLINE int indexof(const btDbvtNode* node) +static DBVT_INLINE int indexof(const btDbvtNode* node) { - return(node->parent->childs[1]==node); + return (node->parent->childs[1] == node); } // -static DBVT_INLINE btDbvtVolume merge( const btDbvtVolume& a, +static DBVT_INLINE btDbvtVolume merge(const btDbvtVolume& a, const btDbvtVolume& b) { -#if (DBVT_MERGE_IMPL==DBVT_IMPL_SSE) - ATTRIBUTE_ALIGNED16( char locals[sizeof(btDbvtAabbMm)]); - btDbvtVolume* ptr = (btDbvtVolume*) locals; - btDbvtVolume& res=*ptr; +#ifdef BT_USE_SSE + ATTRIBUTE_ALIGNED16(char locals[sizeof(btDbvtAabbMm)]); + btDbvtVolume* ptr = (btDbvtVolume*)locals; + btDbvtVolume& res = *ptr; #else - btDbvtVolume res; + btDbvtVolume res; #endif - Merge(a,b,res); - return(res); + Merge(a, b, res); + return (res); } // volume+edge lengths -static DBVT_INLINE btScalar size(const btDbvtVolume& a) +static DBVT_INLINE btScalar size(const btDbvtVolume& a) { - const btVector3 edges=a.Lengths(); - return( edges.x()*edges.y()*edges.z()+ - edges.x()+edges.y()+edges.z()); + const btVector3 edges = a.Lengths(); + return (edges.x() * edges.y() * edges.z() + + edges.x() + edges.y() + edges.z()); } // -static void getmaxdepth(const btDbvtNode* node,int depth,int& maxdepth) +static void getmaxdepth(const btDbvtNode* node, int depth, int& maxdepth) { - if(node->isinternal()) + if (node->isinternal()) { - getmaxdepth(node->childs[0],depth+1,maxdepth); - getmaxdepth(node->childs[1],depth+1,maxdepth); - } else maxdepth=btMax(maxdepth,depth); + getmaxdepth(node->childs[0], depth + 1, maxdepth); + getmaxdepth(node->childs[1], depth + 1, maxdepth); + } + else + maxdepth = btMax(maxdepth, depth); } // -static DBVT_INLINE void deletenode( btDbvt* pdbvt, - btDbvtNode* node) +static DBVT_INLINE void deletenode(btDbvt* pdbvt, + btDbvtNode* node) { btAlignedFree(pdbvt->m_free); - pdbvt->m_free=node; + pdbvt->m_free = node; } // -static void recursedeletenode( btDbvt* pdbvt, - btDbvtNode* node) +static void recursedeletenode(btDbvt* pdbvt, + btDbvtNode* node) { - if(!node->isleaf()) + if (node == 0) return; + if (!node->isleaf()) { - recursedeletenode(pdbvt,node->childs[0]); - recursedeletenode(pdbvt,node->childs[1]); + recursedeletenode(pdbvt, node->childs[0]); + recursedeletenode(pdbvt, node->childs[1]); } - if(node==pdbvt->m_root) pdbvt->m_root=0; - deletenode(pdbvt,node); + if (node == pdbvt->m_root) pdbvt->m_root = 0; + deletenode(pdbvt, node); } // -static DBVT_INLINE btDbvtNode* createnode( btDbvt* pdbvt, - btDbvtNode* parent, - void* data) +static DBVT_INLINE btDbvtNode* createnode(btDbvt* pdbvt, + btDbvtNode* parent, + void* data) { - btDbvtNode* node; - if(pdbvt->m_free) - { node=pdbvt->m_free;pdbvt->m_free=0; } + btDbvtNode* node; + if (pdbvt->m_free) + { + node = pdbvt->m_free; + pdbvt->m_free = 0; + } else - { node=new(btAlignedAlloc(sizeof(btDbvtNode),16)) btDbvtNode(); } - node->parent = parent; - node->data = data; - node->childs[1] = 0; - return(node); + { + node = new (btAlignedAlloc(sizeof(btDbvtNode), 16)) btDbvtNode(); + } + node->parent = parent; + node->data = data; + node->childs[1] = 0; + return (node); } // -static DBVT_INLINE btDbvtNode* createnode( btDbvt* pdbvt, - btDbvtNode* parent, - const btDbvtVolume& volume, - void* data) +static DBVT_INLINE btDbvtNode* createnode(btDbvt* pdbvt, + btDbvtNode* parent, + const btDbvtVolume& volume, + void* data) { - btDbvtNode* node=createnode(pdbvt,parent,data); - node->volume=volume; - return(node); + btDbvtNode* node = createnode(pdbvt, parent, data); + node->volume = volume; + return (node); } // -static DBVT_INLINE btDbvtNode* createnode( btDbvt* pdbvt, - btDbvtNode* parent, - const btDbvtVolume& volume0, - const btDbvtVolume& volume1, - void* data) +static DBVT_INLINE btDbvtNode* createnode(btDbvt* pdbvt, + btDbvtNode* parent, + const btDbvtVolume& volume0, + const btDbvtVolume& volume1, + void* data) { - btDbvtNode* node=createnode(pdbvt,parent,data); - Merge(volume0,volume1,node->volume); - return(node); + btDbvtNode* node = createnode(pdbvt, parent, data); + Merge(volume0, volume1, node->volume); + return (node); } // -static void insertleaf( btDbvt* pdbvt, - btDbvtNode* root, - btDbvtNode* leaf) +static void insertleaf(btDbvt* pdbvt, + btDbvtNode* root, + btDbvtNode* leaf) { - if(!pdbvt->m_root) + if (!pdbvt->m_root) { - pdbvt->m_root = leaf; - leaf->parent = 0; + pdbvt->m_root = leaf; + leaf->parent = 0; } else { - if(!root->isleaf()) + if (!root->isleaf()) { - do { - root=root->childs[Select( leaf->volume, - root->childs[0]->volume, - root->childs[1]->volume)]; - } while(!root->isleaf()); + do + { + root = root->childs[Select(leaf->volume, + root->childs[0]->volume, + root->childs[1]->volume)]; + } while (!root->isleaf()); } - btDbvtNode* prev=root->parent; - btDbvtNode* node=createnode(pdbvt,prev,leaf->volume,root->volume,0); - if(prev) + btDbvtNode* prev = root->parent; + btDbvtNode* node = createnode(pdbvt, prev, leaf->volume, root->volume, 0); + if (prev) { - prev->childs[indexof(root)] = node; - node->childs[0] = root;root->parent=node; - node->childs[1] = leaf;leaf->parent=node; - do { - if(!prev->volume.Contain(node->volume)) - Merge(prev->childs[0]->volume,prev->childs[1]->volume,prev->volume); + prev->childs[indexof(root)] = node; + node->childs[0] = root; + root->parent = node; + node->childs[1] = leaf; + leaf->parent = node; + do + { + if (!prev->volume.Contain(node->volume)) + Merge(prev->childs[0]->volume, prev->childs[1]->volume, prev->volume); else break; - node=prev; - } while(0!=(prev=node->parent)); + node = prev; + } while (0 != (prev = node->parent)); } else { - node->childs[0] = root;root->parent=node; - node->childs[1] = leaf;leaf->parent=node; - pdbvt->m_root = node; + node->childs[0] = root; + root->parent = node; + node->childs[1] = leaf; + leaf->parent = node; + pdbvt->m_root = node; } } } // -static btDbvtNode* removeleaf( btDbvt* pdbvt, - btDbvtNode* leaf) +static btDbvtNode* removeleaf(btDbvt* pdbvt, + btDbvtNode* leaf) { - if(leaf==pdbvt->m_root) + if (leaf == pdbvt->m_root) { - pdbvt->m_root=0; - return(0); + pdbvt->m_root = 0; + return (0); } else { - btDbvtNode* parent=leaf->parent; - btDbvtNode* prev=parent->parent; - btDbvtNode* sibling=parent->childs[1-indexof(leaf)]; - if(prev) + btDbvtNode* parent = leaf->parent; + btDbvtNode* prev = parent->parent; + btDbvtNode* sibling = parent->childs[1 - indexof(leaf)]; + if (prev) { - prev->childs[indexof(parent)]=sibling; - sibling->parent=prev; - deletenode(pdbvt,parent); - while(prev) + prev->childs[indexof(parent)] = sibling; + sibling->parent = prev; + deletenode(pdbvt, parent); + while (prev) { - const btDbvtVolume pb=prev->volume; - Merge(prev->childs[0]->volume,prev->childs[1]->volume,prev->volume); - if(NotEqual(pb,prev->volume)) + const btDbvtVolume pb = prev->volume; + Merge(prev->childs[0]->volume, prev->childs[1]->volume, prev->volume); + if (NotEqual(pb, prev->volume)) { - prev=prev->parent; - } else break; + prev = prev->parent; + } + else + break; } - return(prev?prev:pdbvt->m_root); + return (prev ? prev : pdbvt->m_root); } else - { - pdbvt->m_root=sibling; - sibling->parent=0; - deletenode(pdbvt,parent); - return(pdbvt->m_root); - } + { + pdbvt->m_root = sibling; + sibling->parent = 0; + deletenode(pdbvt, parent); + return (pdbvt->m_root); + } } } // -static void fetchleaves(btDbvt* pdbvt, - btDbvtNode* root, - tNodeArray& leaves, - int depth=-1) +static void fetchleaves(btDbvt* pdbvt, + btDbvtNode* root, + tNodeArray& leaves, + int depth = -1) { - if(root->isinternal()&&depth) + if (root->isinternal() && depth) { - fetchleaves(pdbvt,root->childs[0],leaves,depth-1); - fetchleaves(pdbvt,root->childs[1],leaves,depth-1); - deletenode(pdbvt,root); + fetchleaves(pdbvt, root->childs[0], leaves, depth - 1); + fetchleaves(pdbvt, root->childs[1], leaves, depth - 1); + deletenode(pdbvt, root); } else { @@ -229,51 +245,50 @@ static void fetchleaves(btDbvt* pdbvt, } // -static bool leftOfAxis( const btDbvtNode* node, - const btVector3& org, - const btVector3& axis) +static bool leftOfAxis(const btDbvtNode* node, + const btVector3& org, + const btVector3& axis) { return btDot(axis, node->volume.Center() - org) <= 0; } - // Partitions leaves such that leaves[0, n) are on the // left of axis, and leaves[n, count) are on the right // of axis. returns N. -static int split( btDbvtNode** leaves, - int count, - const btVector3& org, - const btVector3& axis) +static int split(btDbvtNode** leaves, + int count, + const btVector3& org, + const btVector3& axis) { - int begin=0; - int end=count; - for(;;) + int begin = 0; + int end = count; + for (;;) { - while(begin!=end && leftOfAxis(leaves[begin],org,axis)) + while (begin != end && leftOfAxis(leaves[begin], org, axis)) { ++begin; } - if(begin==end) + if (begin == end) { break; } - while(begin!=end && !leftOfAxis(leaves[end-1],org,axis)) + while (begin != end && !leftOfAxis(leaves[end - 1], org, axis)) { --end; } - if(begin==end) + if (begin == end) { break; } // swap out of place nodes --end; - btDbvtNode* temp=leaves[begin]; - leaves[begin]=leaves[end]; - leaves[end]=temp; + btDbvtNode* temp = leaves[begin]; + leaves[begin] = leaves[end]; + leaves[end] = temp; ++begin; } @@ -281,150 +296,153 @@ static int split( btDbvtNode** leaves, } // -static btDbvtVolume bounds( btDbvtNode** leaves, - int count) +static btDbvtVolume bounds(btDbvtNode** leaves, + int count) { -#if DBVT_MERGE_IMPL==DBVT_IMPL_SSE - ATTRIBUTE_ALIGNED16(char locals[sizeof(btDbvtVolume)]); - btDbvtVolume* ptr = (btDbvtVolume*) locals; - btDbvtVolume& volume=*ptr; - volume=leaves[0]->volume; +#ifdef BT_USE_SSE + ATTRIBUTE_ALIGNED16(char locals[sizeof(btDbvtVolume)]); + btDbvtVolume* ptr = (btDbvtVolume*)locals; + btDbvtVolume& volume = *ptr; + volume = leaves[0]->volume; #else - btDbvtVolume volume=leaves[0]->volume; + btDbvtVolume volume = leaves[0]->volume; #endif - for(int i=1,ni=count;i<ni;++i) + for (int i = 1, ni = count; i < ni; ++i) { - Merge(volume,leaves[i]->volume,volume); + Merge(volume, leaves[i]->volume, volume); } - return(volume); + return (volume); } // -static void bottomup( btDbvt* pdbvt, - btDbvtNode** leaves, - int count) +static void bottomup(btDbvt* pdbvt, + btDbvtNode** leaves, + int count) { - while(count>1) + while (count > 1) { - btScalar minsize=SIMD_INFINITY; - int minidx[2]={-1,-1}; - for(int i=0;i<count;++i) + btScalar minsize = SIMD_INFINITY; + int minidx[2] = {-1, -1}; + for (int i = 0; i < count; ++i) { - for(int j=i+1;j<count;++j) + for (int j = i + 1; j < count; ++j) { - const btScalar sz=size(merge(leaves[i]->volume,leaves[j]->volume)); - if(sz<minsize) + const btScalar sz = size(merge(leaves[i]->volume, leaves[j]->volume)); + if (sz < minsize) { - minsize = sz; - minidx[0] = i; - minidx[1] = j; + minsize = sz; + minidx[0] = i; + minidx[1] = j; } } } - btDbvtNode* n[] = {leaves[minidx[0]],leaves[minidx[1]]}; - btDbvtNode* p = createnode(pdbvt,0,n[0]->volume,n[1]->volume,0); - p->childs[0] = n[0]; - p->childs[1] = n[1]; - n[0]->parent = p; - n[1]->parent = p; - leaves[minidx[0]] = p; - leaves[minidx[1]] = leaves[count-1]; + btDbvtNode* n[] = {leaves[minidx[0]], leaves[minidx[1]]}; + btDbvtNode* p = createnode(pdbvt, 0, n[0]->volume, n[1]->volume, 0); + p->childs[0] = n[0]; + p->childs[1] = n[1]; + n[0]->parent = p; + n[1]->parent = p; + leaves[minidx[0]] = p; + leaves[minidx[1]] = leaves[count - 1]; --count; } } // -static btDbvtNode* topdown(btDbvt* pdbvt, - btDbvtNode** leaves, - int count, - int bu_treshold) +static btDbvtNode* topdown(btDbvt* pdbvt, + btDbvtNode** leaves, + int count, + int bu_treshold) { - static const btVector3 axis[]={btVector3(1,0,0), - btVector3(0,1,0), - btVector3(0,0,1)}; - btAssert(bu_treshold>2); - if(count>1) + static const btVector3 axis[] = {btVector3(1, 0, 0), + btVector3(0, 1, 0), + btVector3(0, 0, 1)}; + btAssert(bu_treshold > 2); + if (count > 1) { - if(count>bu_treshold) + if (count > bu_treshold) { - const btDbvtVolume vol=bounds(leaves,count); - const btVector3 org=vol.Center(); - int partition; - int bestaxis=-1; - int bestmidp=count; - int splitcount[3][2]={{0,0},{0,0},{0,0}}; + const btDbvtVolume vol = bounds(leaves, count); + const btVector3 org = vol.Center(); + int partition; + int bestaxis = -1; + int bestmidp = count; + int splitcount[3][2] = {{0, 0}, {0, 0}, {0, 0}}; int i; - for( i=0;i<count;++i) + for (i = 0; i < count; ++i) { - const btVector3 x=leaves[i]->volume.Center()-org; - for(int j=0;j<3;++j) + const btVector3 x = leaves[i]->volume.Center() - org; + for (int j = 0; j < 3; ++j) { - ++splitcount[j][btDot(x,axis[j])>0?1:0]; + ++splitcount[j][btDot(x, axis[j]) > 0 ? 1 : 0]; } } - for( i=0;i<3;++i) + for (i = 0; i < 3; ++i) { - if((splitcount[i][0]>0)&&(splitcount[i][1]>0)) + if ((splitcount[i][0] > 0) && (splitcount[i][1] > 0)) { - const int midp=(int)btFabs(btScalar(splitcount[i][0]-splitcount[i][1])); - if(midp<bestmidp) + const int midp = (int)btFabs(btScalar(splitcount[i][0] - splitcount[i][1])); + if (midp < bestmidp) { - bestaxis=i; - bestmidp=midp; + bestaxis = i; + bestmidp = midp; } } } - if(bestaxis>=0) + if (bestaxis >= 0) { - partition=split(leaves,count,org,axis[bestaxis]); - btAssert(partition!=0 && partition!=count); + partition = split(leaves, count, org, axis[bestaxis]); + btAssert(partition != 0 && partition != count); } else { - partition=count/2+1; + partition = count / 2 + 1; } - btDbvtNode* node=createnode(pdbvt,0,vol,0); - node->childs[0]=topdown(pdbvt,&leaves[0],partition,bu_treshold); - node->childs[1]=topdown(pdbvt,&leaves[partition],count-partition,bu_treshold); - node->childs[0]->parent=node; - node->childs[1]->parent=node; - return(node); + btDbvtNode* node = createnode(pdbvt, 0, vol, 0); + node->childs[0] = topdown(pdbvt, &leaves[0], partition, bu_treshold); + node->childs[1] = topdown(pdbvt, &leaves[partition], count - partition, bu_treshold); + node->childs[0]->parent = node; + node->childs[1]->parent = node; + return (node); } else { - bottomup(pdbvt,leaves,count); - return(leaves[0]); + bottomup(pdbvt, leaves, count); + return (leaves[0]); } } - return(leaves[0]); + return (leaves[0]); } // -static DBVT_INLINE btDbvtNode* sort(btDbvtNode* n,btDbvtNode*& r) +static DBVT_INLINE btDbvtNode* sort(btDbvtNode* n, btDbvtNode*& r) { - btDbvtNode* p=n->parent; + btDbvtNode* p = n->parent; btAssert(n->isinternal()); - if(p>n) + if (p > n) { - const int i=indexof(n); - const int j=1-i; - btDbvtNode* s=p->childs[j]; - btDbvtNode* q=p->parent; - btAssert(n==p->childs[i]); - if(q) q->childs[indexof(p)]=n; else r=n; - s->parent=n; - p->parent=n; - n->parent=q; - p->childs[0]=n->childs[0]; - p->childs[1]=n->childs[1]; - n->childs[0]->parent=p; - n->childs[1]->parent=p; - n->childs[i]=p; - n->childs[j]=s; - btSwap(p->volume,n->volume); - return(p); + const int i = indexof(n); + const int j = 1 - i; + btDbvtNode* s = p->childs[j]; + btDbvtNode* q = p->parent; + btAssert(n == p->childs[i]); + if (q) + q->childs[indexof(p)] = n; + else + r = n; + s->parent = n; + p->parent = n; + n->parent = q; + p->childs[0] = n->childs[0]; + p->childs[1] = n->childs[1]; + n->childs[0]->parent = p; + n->childs[1]->parent = p; + n->childs[i] = p; + n->childs[j] = s; + btSwap(p->volume, n->volume); + return (p); } - return(n); + return (n); } #if 0 @@ -442,11 +460,11 @@ static DBVT_INLINE btDbvtNode* walkup(btDbvtNode* n,int count) // btDbvt::btDbvt() { - m_root = 0; - m_free = 0; - m_lkhd = -1; - m_leaves = 0; - m_opath = 0; + m_root = 0; + m_free = 0; + m_lkhd = -1; + m_leaves = 0; + m_opath = 0; } // @@ -456,228 +474,233 @@ btDbvt::~btDbvt() } // -void btDbvt::clear() +void btDbvt::clear() { - if(m_root) - recursedeletenode(this,m_root); + if (m_root) + recursedeletenode(this, m_root); btAlignedFree(m_free); - m_free=0; - m_lkhd = -1; + m_free = 0; + m_lkhd = -1; m_stkStack.clear(); - m_opath = 0; - + m_opath = 0; } // -void btDbvt::optimizeBottomUp() +void btDbvt::optimizeBottomUp() { - if(m_root) + if (m_root) { tNodeArray leaves; leaves.reserve(m_leaves); - fetchleaves(this,m_root,leaves); - bottomup(this,&leaves[0],leaves.size()); - m_root=leaves[0]; + fetchleaves(this, m_root, leaves); + bottomup(this, &leaves[0], leaves.size()); + m_root = leaves[0]; } } // -void btDbvt::optimizeTopDown(int bu_treshold) +void btDbvt::optimizeTopDown(int bu_treshold) { - if(m_root) + if (m_root) { - tNodeArray leaves; + tNodeArray leaves; leaves.reserve(m_leaves); - fetchleaves(this,m_root,leaves); - m_root=topdown(this,&leaves[0],leaves.size(),bu_treshold); + fetchleaves(this, m_root, leaves); + m_root = topdown(this, &leaves[0], leaves.size(), bu_treshold); } } // -void btDbvt::optimizeIncremental(int passes) +void btDbvt::optimizeIncremental(int passes) { - if(passes<0) passes=m_leaves; - if(m_root&&(passes>0)) + if (passes < 0) passes = m_leaves; + if (m_root && (passes > 0)) { - do { - btDbvtNode* node=m_root; - unsigned bit=0; - while(node->isinternal()) + do + { + btDbvtNode* node = m_root; + unsigned bit = 0; + while (node->isinternal()) { - node=sort(node,m_root)->childs[(m_opath>>bit)&1]; - bit=(bit+1)&(sizeof(unsigned)*8-1); + node = sort(node, m_root)->childs[(m_opath >> bit) & 1]; + bit = (bit + 1) & (sizeof(unsigned) * 8 - 1); } update(node); ++m_opath; - } while(--passes); + } while (--passes); } } // -btDbvtNode* btDbvt::insert(const btDbvtVolume& volume,void* data) +btDbvtNode* btDbvt::insert(const btDbvtVolume& volume, void* data) { - btDbvtNode* leaf=createnode(this,0,volume,data); - insertleaf(this,m_root,leaf); + btDbvtNode* leaf = createnode(this, 0, volume, data); + insertleaf(this, m_root, leaf); ++m_leaves; - return(leaf); + return (leaf); } // -void btDbvt::update(btDbvtNode* leaf,int lookahead) +void btDbvt::update(btDbvtNode* leaf, int lookahead) { - btDbvtNode* root=removeleaf(this,leaf); - if(root) + btDbvtNode* root = removeleaf(this, leaf); + if (root) { - if(lookahead>=0) + if (lookahead >= 0) { - for(int i=0;(i<lookahead)&&root->parent;++i) + for (int i = 0; (i < lookahead) && root->parent; ++i) { - root=root->parent; + root = root->parent; } - } else root=m_root; + } + else + root = m_root; } - insertleaf(this,root,leaf); + insertleaf(this, root, leaf); } // -void btDbvt::update(btDbvtNode* leaf,btDbvtVolume& volume) +void btDbvt::update(btDbvtNode* leaf, btDbvtVolume& volume) { - btDbvtNode* root=removeleaf(this,leaf); - if(root) + btDbvtNode* root = removeleaf(this, leaf); + if (root) { - if(m_lkhd>=0) + if (m_lkhd >= 0) { - for(int i=0;(i<m_lkhd)&&root->parent;++i) + for (int i = 0; (i < m_lkhd) && root->parent; ++i) { - root=root->parent; + root = root->parent; } - } else root=m_root; + } + else + root = m_root; } - leaf->volume=volume; - insertleaf(this,root,leaf); + leaf->volume = volume; + insertleaf(this, root, leaf); } // -bool btDbvt::update(btDbvtNode* leaf,btDbvtVolume& volume,const btVector3& velocity,btScalar margin) +bool btDbvt::update(btDbvtNode* leaf, btDbvtVolume& volume, const btVector3& velocity, btScalar margin) { - if(leaf->volume.Contain(volume)) return(false); - volume.Expand(btVector3(margin,margin,margin)); + if (leaf->volume.Contain(volume)) return (false); + volume.Expand(btVector3(margin, margin, margin)); volume.SignedExpand(velocity); - update(leaf,volume); - return(true); + update(leaf, volume); + return (true); } // -bool btDbvt::update(btDbvtNode* leaf,btDbvtVolume& volume,const btVector3& velocity) +bool btDbvt::update(btDbvtNode* leaf, btDbvtVolume& volume, const btVector3& velocity) { - if(leaf->volume.Contain(volume)) return(false); + if (leaf->volume.Contain(volume)) return (false); volume.SignedExpand(velocity); - update(leaf,volume); - return(true); + update(leaf, volume); + return (true); } // -bool btDbvt::update(btDbvtNode* leaf,btDbvtVolume& volume,btScalar margin) +bool btDbvt::update(btDbvtNode* leaf, btDbvtVolume& volume, btScalar margin) { - if(leaf->volume.Contain(volume)) return(false); - volume.Expand(btVector3(margin,margin,margin)); - update(leaf,volume); - return(true); + if (leaf->volume.Contain(volume)) return (false); + volume.Expand(btVector3(margin, margin, margin)); + update(leaf, volume); + return (true); } // -void btDbvt::remove(btDbvtNode* leaf) +void btDbvt::remove(btDbvtNode* leaf) { - removeleaf(this,leaf); - deletenode(this,leaf); + removeleaf(this, leaf); + deletenode(this, leaf); --m_leaves; } // -void btDbvt::write(IWriter* iwriter) const +void btDbvt::write(IWriter* iwriter) const { - btDbvtNodeEnumerator nodes; - nodes.nodes.reserve(m_leaves*2); - enumNodes(m_root,nodes); - iwriter->Prepare(m_root,nodes.nodes.size()); - for(int i=0;i<nodes.nodes.size();++i) + btDbvtNodeEnumerator nodes; + nodes.nodes.reserve(m_leaves * 2); + enumNodes(m_root, nodes); + iwriter->Prepare(m_root, nodes.nodes.size()); + for (int i = 0; i < nodes.nodes.size(); ++i) { - const btDbvtNode* n=nodes.nodes[i]; - int p=-1; - if(n->parent) p=nodes.nodes.findLinearSearch(n->parent); - if(n->isinternal()) + const btDbvtNode* n = nodes.nodes[i]; + int p = -1; + if (n->parent) p = nodes.nodes.findLinearSearch(n->parent); + if (n->isinternal()) { - const int c0=nodes.nodes.findLinearSearch(n->childs[0]); - const int c1=nodes.nodes.findLinearSearch(n->childs[1]); - iwriter->WriteNode(n,i,p,c0,c1); + const int c0 = nodes.nodes.findLinearSearch(n->childs[0]); + const int c1 = nodes.nodes.findLinearSearch(n->childs[1]); + iwriter->WriteNode(n, i, p, c0, c1); } else { - iwriter->WriteLeaf(n,i,p); - } + iwriter->WriteLeaf(n, i, p); + } } } // -void btDbvt::clone(btDbvt& dest,IClone* iclone) const +void btDbvt::clone(btDbvt& dest, IClone* iclone) const { dest.clear(); - if(m_root!=0) - { - btAlignedObjectArray<sStkCLN> stack; + if (m_root != 0) + { + btAlignedObjectArray<sStkCLN> stack; stack.reserve(m_leaves); - stack.push_back(sStkCLN(m_root,0)); - do { - const int i=stack.size()-1; - const sStkCLN e=stack[i]; - btDbvtNode* n=createnode(&dest,e.parent,e.node->volume,e.node->data); + stack.push_back(sStkCLN(m_root, 0)); + do + { + const int i = stack.size() - 1; + const sStkCLN e = stack[i]; + btDbvtNode* n = createnode(&dest, e.parent, e.node->volume, e.node->data); stack.pop_back(); - if(e.parent!=0) - e.parent->childs[i&1]=n; + if (e.parent != 0) + e.parent->childs[i & 1] = n; else - dest.m_root=n; - if(e.node->isinternal()) + dest.m_root = n; + if (e.node->isinternal()) { - stack.push_back(sStkCLN(e.node->childs[0],n)); - stack.push_back(sStkCLN(e.node->childs[1],n)); + stack.push_back(sStkCLN(e.node->childs[0], n)); + stack.push_back(sStkCLN(e.node->childs[1], n)); } else { iclone->CloneLeaf(n); } - } while(stack.size()>0); + } while (stack.size() > 0); } } // -int btDbvt::maxdepth(const btDbvtNode* node) +int btDbvt::maxdepth(const btDbvtNode* node) { - int depth=0; - if(node) getmaxdepth(node,1,depth); - return(depth); + int depth = 0; + if (node) getmaxdepth(node, 1, depth); + return (depth); } // -int btDbvt::countLeaves(const btDbvtNode* node) +int btDbvt::countLeaves(const btDbvtNode* node) { - if(node->isinternal()) - return(countLeaves(node->childs[0])+countLeaves(node->childs[1])); + if (node->isinternal()) + return (countLeaves(node->childs[0]) + countLeaves(node->childs[1])); else - return(1); + return (1); } // -void btDbvt::extractLeaves(const btDbvtNode* node,btAlignedObjectArray<const btDbvtNode*>& leaves) +void btDbvt::extractLeaves(const btDbvtNode* node, btAlignedObjectArray<const btDbvtNode*>& leaves) { - if(node->isinternal()) + if (node->isinternal()) { - extractLeaves(node->childs[0],leaves); - extractLeaves(node->childs[1],leaves); + extractLeaves(node->childs[0], leaves); + extractLeaves(node->childs[1], leaves); } else { leaves.push_back(node); - } + } } // @@ -726,603 +749,608 @@ struct btDbvtBenchmark { struct NilPolicy : btDbvt::ICollide { - NilPolicy() : m_pcount(0),m_depth(-SIMD_INFINITY),m_checksort(true) {} - void Process(const btDbvtNode*,const btDbvtNode*) { ++m_pcount; } - void Process(const btDbvtNode*) { ++m_pcount; } - void Process(const btDbvtNode*,btScalar depth) + NilPolicy() : m_pcount(0), m_depth(-SIMD_INFINITY), m_checksort(true) {} + void Process(const btDbvtNode*, const btDbvtNode*) { ++m_pcount; } + void Process(const btDbvtNode*) { ++m_pcount; } + void Process(const btDbvtNode*, btScalar depth) { ++m_pcount; - if(m_checksort) - { if(depth>=m_depth) m_depth=depth; else printf("wrong depth: %f (should be >= %f)\r\n",depth,m_depth); } + if (m_checksort) + { + if (depth >= m_depth) + m_depth = depth; + else + printf("wrong depth: %f (should be >= %f)\r\n", depth, m_depth); + } } - int m_pcount; - btScalar m_depth; - bool m_checksort; + int m_pcount; + btScalar m_depth; + bool m_checksort; }; struct P14 : btDbvt::ICollide { struct Node { - const btDbvtNode* leaf; - btScalar depth; + const btDbvtNode* leaf; + btScalar depth; }; - void Process(const btDbvtNode* leaf,btScalar depth) + void Process(const btDbvtNode* leaf, btScalar depth) { - Node n; - n.leaf = leaf; - n.depth = depth; + Node n; + n.leaf = leaf; + n.depth = depth; } - static int sortfnc(const Node& a,const Node& b) + static int sortfnc(const Node& a, const Node& b) { - if(a.depth<b.depth) return(+1); - if(a.depth>b.depth) return(-1); - return(0); + if (a.depth < b.depth) return (+1); + if (a.depth > b.depth) return (-1); + return (0); } - btAlignedObjectArray<Node> m_nodes; + btAlignedObjectArray<Node> m_nodes; }; struct P15 : btDbvt::ICollide { struct Node { - const btDbvtNode* leaf; - btScalar depth; + const btDbvtNode* leaf; + btScalar depth; }; void Process(const btDbvtNode* leaf) { - Node n; - n.leaf = leaf; - n.depth = dot(leaf->volume.Center(),m_axis); + Node n; + n.leaf = leaf; + n.depth = dot(leaf->volume.Center(), m_axis); } - static int sortfnc(const Node& a,const Node& b) + static int sortfnc(const Node& a, const Node& b) { - if(a.depth<b.depth) return(+1); - if(a.depth>b.depth) return(-1); - return(0); + if (a.depth < b.depth) return (+1); + if (a.depth > b.depth) return (-1); + return (0); } - btAlignedObjectArray<Node> m_nodes; - btVector3 m_axis; + btAlignedObjectArray<Node> m_nodes; + btVector3 m_axis; }; - static btScalar RandUnit() + static btScalar RandUnit() { - return(rand()/(btScalar)RAND_MAX); + return (rand() / (btScalar)RAND_MAX); } - static btVector3 RandVector3() + static btVector3 RandVector3() { - return(btVector3(RandUnit(),RandUnit(),RandUnit())); + return (btVector3(RandUnit(), RandUnit(), RandUnit())); } - static btVector3 RandVector3(btScalar cs) + static btVector3 RandVector3(btScalar cs) { - return(RandVector3()*cs-btVector3(cs,cs,cs)/2); + return (RandVector3() * cs - btVector3(cs, cs, cs) / 2); } - static btDbvtVolume RandVolume(btScalar cs,btScalar eb,btScalar es) + static btDbvtVolume RandVolume(btScalar cs, btScalar eb, btScalar es) { - return(btDbvtVolume::FromCE(RandVector3(cs),btVector3(eb,eb,eb)+RandVector3()*es)); + return (btDbvtVolume::FromCE(RandVector3(cs), btVector3(eb, eb, eb) + RandVector3() * es)); } - static btTransform RandTransform(btScalar cs) + static btTransform RandTransform(btScalar cs) { - btTransform t; + btTransform t; t.setOrigin(RandVector3(cs)); - t.setRotation(btQuaternion(RandUnit()*SIMD_PI*2,RandUnit()*SIMD_PI*2,RandUnit()*SIMD_PI*2).normalized()); - return(t); + t.setRotation(btQuaternion(RandUnit() * SIMD_PI * 2, RandUnit() * SIMD_PI * 2, RandUnit() * SIMD_PI * 2).normalized()); + return (t); } - static void RandTree(btScalar cs,btScalar eb,btScalar es,int leaves,btDbvt& dbvt) + static void RandTree(btScalar cs, btScalar eb, btScalar es, int leaves, btDbvt& dbvt) { dbvt.clear(); - for(int i=0;i<leaves;++i) + for (int i = 0; i < leaves; ++i) { - dbvt.insert(RandVolume(cs,eb,es),0); + dbvt.insert(RandVolume(cs, eb, es), 0); } } }; -void btDbvt::benchmark() +void btDbvt::benchmark() { - static const btScalar cfgVolumeCenterScale = 100; - static const btScalar cfgVolumeExentsBase = 1; - static const btScalar cfgVolumeExentsScale = 4; - static const int cfgLeaves = 8192; - static const bool cfgEnable = true; + static const btScalar cfgVolumeCenterScale = 100; + static const btScalar cfgVolumeExentsBase = 1; + static const btScalar cfgVolumeExentsScale = 4; + static const int cfgLeaves = 8192; + static const bool cfgEnable = true; //[1] btDbvtVolume intersections - bool cfgBenchmark1_Enable = cfgEnable; - static const int cfgBenchmark1_Iterations = 8; - static const int cfgBenchmark1_Reference = 3499; + bool cfgBenchmark1_Enable = cfgEnable; + static const int cfgBenchmark1_Iterations = 8; + static const int cfgBenchmark1_Reference = 3499; //[2] btDbvtVolume merges - bool cfgBenchmark2_Enable = cfgEnable; - static const int cfgBenchmark2_Iterations = 4; - static const int cfgBenchmark2_Reference = 1945; + bool cfgBenchmark2_Enable = cfgEnable; + static const int cfgBenchmark2_Iterations = 4; + static const int cfgBenchmark2_Reference = 1945; //[3] btDbvt::collideTT - bool cfgBenchmark3_Enable = cfgEnable; - static const int cfgBenchmark3_Iterations = 512; - static const int cfgBenchmark3_Reference = 5485; + bool cfgBenchmark3_Enable = cfgEnable; + static const int cfgBenchmark3_Iterations = 512; + static const int cfgBenchmark3_Reference = 5485; //[4] btDbvt::collideTT self - bool cfgBenchmark4_Enable = cfgEnable; - static const int cfgBenchmark4_Iterations = 512; - static const int cfgBenchmark4_Reference = 2814; + bool cfgBenchmark4_Enable = cfgEnable; + static const int cfgBenchmark4_Iterations = 512; + static const int cfgBenchmark4_Reference = 2814; //[5] btDbvt::collideTT xform - bool cfgBenchmark5_Enable = cfgEnable; - static const int cfgBenchmark5_Iterations = 512; - static const btScalar cfgBenchmark5_OffsetScale = 2; - static const int cfgBenchmark5_Reference = 7379; + bool cfgBenchmark5_Enable = cfgEnable; + static const int cfgBenchmark5_Iterations = 512; + static const btScalar cfgBenchmark5_OffsetScale = 2; + static const int cfgBenchmark5_Reference = 7379; //[6] btDbvt::collideTT xform,self - bool cfgBenchmark6_Enable = cfgEnable; - static const int cfgBenchmark6_Iterations = 512; - static const btScalar cfgBenchmark6_OffsetScale = 2; - static const int cfgBenchmark6_Reference = 7270; + bool cfgBenchmark6_Enable = cfgEnable; + static const int cfgBenchmark6_Iterations = 512; + static const btScalar cfgBenchmark6_OffsetScale = 2; + static const int cfgBenchmark6_Reference = 7270; //[7] btDbvt::rayTest - bool cfgBenchmark7_Enable = cfgEnable; - static const int cfgBenchmark7_Passes = 32; - static const int cfgBenchmark7_Iterations = 65536; - static const int cfgBenchmark7_Reference = 6307; + bool cfgBenchmark7_Enable = cfgEnable; + static const int cfgBenchmark7_Passes = 32; + static const int cfgBenchmark7_Iterations = 65536; + static const int cfgBenchmark7_Reference = 6307; //[8] insert/remove - bool cfgBenchmark8_Enable = cfgEnable; - static const int cfgBenchmark8_Passes = 32; - static const int cfgBenchmark8_Iterations = 65536; - static const int cfgBenchmark8_Reference = 2105; + bool cfgBenchmark8_Enable = cfgEnable; + static const int cfgBenchmark8_Passes = 32; + static const int cfgBenchmark8_Iterations = 65536; + static const int cfgBenchmark8_Reference = 2105; //[9] updates (teleport) - bool cfgBenchmark9_Enable = cfgEnable; - static const int cfgBenchmark9_Passes = 32; - static const int cfgBenchmark9_Iterations = 65536; - static const int cfgBenchmark9_Reference = 1879; + bool cfgBenchmark9_Enable = cfgEnable; + static const int cfgBenchmark9_Passes = 32; + static const int cfgBenchmark9_Iterations = 65536; + static const int cfgBenchmark9_Reference = 1879; //[10] updates (jitter) - bool cfgBenchmark10_Enable = cfgEnable; - static const btScalar cfgBenchmark10_Scale = cfgVolumeCenterScale/10000; - static const int cfgBenchmark10_Passes = 32; - static const int cfgBenchmark10_Iterations = 65536; - static const int cfgBenchmark10_Reference = 1244; + bool cfgBenchmark10_Enable = cfgEnable; + static const btScalar cfgBenchmark10_Scale = cfgVolumeCenterScale / 10000; + static const int cfgBenchmark10_Passes = 32; + static const int cfgBenchmark10_Iterations = 65536; + static const int cfgBenchmark10_Reference = 1244; //[11] optimize (incremental) - bool cfgBenchmark11_Enable = cfgEnable; - static const int cfgBenchmark11_Passes = 64; - static const int cfgBenchmark11_Iterations = 65536; - static const int cfgBenchmark11_Reference = 2510; + bool cfgBenchmark11_Enable = cfgEnable; + static const int cfgBenchmark11_Passes = 64; + static const int cfgBenchmark11_Iterations = 65536; + static const int cfgBenchmark11_Reference = 2510; //[12] btDbvtVolume notequal - bool cfgBenchmark12_Enable = cfgEnable; - static const int cfgBenchmark12_Iterations = 32; - static const int cfgBenchmark12_Reference = 3677; + bool cfgBenchmark12_Enable = cfgEnable; + static const int cfgBenchmark12_Iterations = 32; + static const int cfgBenchmark12_Reference = 3677; //[13] culling(OCL+fullsort) - bool cfgBenchmark13_Enable = cfgEnable; - static const int cfgBenchmark13_Iterations = 1024; - static const int cfgBenchmark13_Reference = 2231; + bool cfgBenchmark13_Enable = cfgEnable; + static const int cfgBenchmark13_Iterations = 1024; + static const int cfgBenchmark13_Reference = 2231; //[14] culling(OCL+qsort) - bool cfgBenchmark14_Enable = cfgEnable; - static const int cfgBenchmark14_Iterations = 8192; - static const int cfgBenchmark14_Reference = 3500; + bool cfgBenchmark14_Enable = cfgEnable; + static const int cfgBenchmark14_Iterations = 8192; + static const int cfgBenchmark14_Reference = 3500; //[15] culling(KDOP+qsort) - bool cfgBenchmark15_Enable = cfgEnable; - static const int cfgBenchmark15_Iterations = 8192; - static const int cfgBenchmark15_Reference = 1151; + bool cfgBenchmark15_Enable = cfgEnable; + static const int cfgBenchmark15_Iterations = 8192; + static const int cfgBenchmark15_Reference = 1151; //[16] insert/remove batch - bool cfgBenchmark16_Enable = cfgEnable; - static const int cfgBenchmark16_BatchCount = 256; - static const int cfgBenchmark16_Passes = 16384; - static const int cfgBenchmark16_Reference = 5138; + bool cfgBenchmark16_Enable = cfgEnable; + static const int cfgBenchmark16_BatchCount = 256; + static const int cfgBenchmark16_Passes = 16384; + static const int cfgBenchmark16_Reference = 5138; //[17] select - bool cfgBenchmark17_Enable = cfgEnable; - static const int cfgBenchmark17_Iterations = 4; - static const int cfgBenchmark17_Reference = 3390; + bool cfgBenchmark17_Enable = cfgEnable; + static const int cfgBenchmark17_Iterations = 4; + static const int cfgBenchmark17_Reference = 3390; - btClock wallclock; + btClock wallclock; printf("Benchmarking dbvt...\r\n"); - printf("\tWorld scale: %f\r\n",cfgVolumeCenterScale); - printf("\tExtents base: %f\r\n",cfgVolumeExentsBase); - printf("\tExtents range: %f\r\n",cfgVolumeExentsScale); - printf("\tLeaves: %u\r\n",cfgLeaves); - printf("\tsizeof(btDbvtVolume): %u bytes\r\n",sizeof(btDbvtVolume)); - printf("\tsizeof(btDbvtNode): %u bytes\r\n",sizeof(btDbvtNode)); - if(cfgBenchmark1_Enable) - {// Benchmark 1 + printf("\tWorld scale: %f\r\n", cfgVolumeCenterScale); + printf("\tExtents base: %f\r\n", cfgVolumeExentsBase); + printf("\tExtents range: %f\r\n", cfgVolumeExentsScale); + printf("\tLeaves: %u\r\n", cfgLeaves); + printf("\tsizeof(btDbvtVolume): %u bytes\r\n", sizeof(btDbvtVolume)); + printf("\tsizeof(btDbvtNode): %u bytes\r\n", sizeof(btDbvtNode)); + if (cfgBenchmark1_Enable) + { // Benchmark 1 srand(380843); - btAlignedObjectArray<btDbvtVolume> volumes; - btAlignedObjectArray<bool> results; + btAlignedObjectArray<btDbvtVolume> volumes; + btAlignedObjectArray<bool> results; volumes.resize(cfgLeaves); results.resize(cfgLeaves); - for(int i=0;i<cfgLeaves;++i) + for (int i = 0; i < cfgLeaves; ++i) { - volumes[i]=btDbvtBenchmark::RandVolume(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale); + volumes[i] = btDbvtBenchmark::RandVolume(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale); } printf("[1] btDbvtVolume intersections: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark1_Iterations;++i) + for (int i = 0; i < cfgBenchmark1_Iterations; ++i) { - for(int j=0;j<cfgLeaves;++j) + for (int j = 0; j < cfgLeaves; ++j) { - for(int k=0;k<cfgLeaves;++k) + for (int k = 0; k < cfgLeaves; ++k) { - results[k]=Intersect(volumes[j],volumes[k]); + results[k] = Intersect(volumes[j], volumes[k]); } } } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark1_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark1_Reference) * 100 / time); } - if(cfgBenchmark2_Enable) - {// Benchmark 2 + if (cfgBenchmark2_Enable) + { // Benchmark 2 srand(380843); - btAlignedObjectArray<btDbvtVolume> volumes; - btAlignedObjectArray<btDbvtVolume> results; + btAlignedObjectArray<btDbvtVolume> volumes; + btAlignedObjectArray<btDbvtVolume> results; volumes.resize(cfgLeaves); results.resize(cfgLeaves); - for(int i=0;i<cfgLeaves;++i) + for (int i = 0; i < cfgLeaves; ++i) { - volumes[i]=btDbvtBenchmark::RandVolume(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale); + volumes[i] = btDbvtBenchmark::RandVolume(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale); } printf("[2] btDbvtVolume merges: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark2_Iterations;++i) + for (int i = 0; i < cfgBenchmark2_Iterations; ++i) { - for(int j=0;j<cfgLeaves;++j) + for (int j = 0; j < cfgLeaves; ++j) { - for(int k=0;k<cfgLeaves;++k) + for (int k = 0; k < cfgLeaves; ++k) { - Merge(volumes[j],volumes[k],results[k]); + Merge(volumes[j], volumes[k], results[k]); } } } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark2_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark2_Reference) * 100 / time); } - if(cfgBenchmark3_Enable) - {// Benchmark 3 + if (cfgBenchmark3_Enable) + { // Benchmark 3 srand(380843); - btDbvt dbvt[2]; - btDbvtBenchmark::NilPolicy policy; - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt[0]); - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt[1]); + btDbvt dbvt[2]; + btDbvtBenchmark::NilPolicy policy; + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt[0]); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt[1]); dbvt[0].optimizeTopDown(); dbvt[1].optimizeTopDown(); printf("[3] btDbvt::collideTT: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark3_Iterations;++i) + for (int i = 0; i < cfgBenchmark3_Iterations; ++i) { - btDbvt::collideTT(dbvt[0].m_root,dbvt[1].m_root,policy); + btDbvt::collideTT(dbvt[0].m_root, dbvt[1].m_root, policy); } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark3_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark3_Reference) * 100 / time); } - if(cfgBenchmark4_Enable) - {// Benchmark 4 + if (cfgBenchmark4_Enable) + { // Benchmark 4 srand(380843); - btDbvt dbvt; - btDbvtBenchmark::NilPolicy policy; - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvt dbvt; + btDbvtBenchmark::NilPolicy policy; + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); printf("[4] btDbvt::collideTT self: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark4_Iterations;++i) + for (int i = 0; i < cfgBenchmark4_Iterations; ++i) { - btDbvt::collideTT(dbvt.m_root,dbvt.m_root,policy); + btDbvt::collideTT(dbvt.m_root, dbvt.m_root, policy); } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark4_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark4_Reference) * 100 / time); } - if(cfgBenchmark5_Enable) - {// Benchmark 5 + if (cfgBenchmark5_Enable) + { // Benchmark 5 srand(380843); - btDbvt dbvt[2]; - btAlignedObjectArray<btTransform> transforms; - btDbvtBenchmark::NilPolicy policy; + btDbvt dbvt[2]; + btAlignedObjectArray<btTransform> transforms; + btDbvtBenchmark::NilPolicy policy; transforms.resize(cfgBenchmark5_Iterations); - for(int i=0;i<transforms.size();++i) + for (int i = 0; i < transforms.size(); ++i) { - transforms[i]=btDbvtBenchmark::RandTransform(cfgVolumeCenterScale*cfgBenchmark5_OffsetScale); + transforms[i] = btDbvtBenchmark::RandTransform(cfgVolumeCenterScale * cfgBenchmark5_OffsetScale); } - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt[0]); - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt[1]); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt[0]); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt[1]); dbvt[0].optimizeTopDown(); dbvt[1].optimizeTopDown(); printf("[5] btDbvt::collideTT xform: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark5_Iterations;++i) + for (int i = 0; i < cfgBenchmark5_Iterations; ++i) { - btDbvt::collideTT(dbvt[0].m_root,dbvt[1].m_root,transforms[i],policy); + btDbvt::collideTT(dbvt[0].m_root, dbvt[1].m_root, transforms[i], policy); } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark5_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark5_Reference) * 100 / time); } - if(cfgBenchmark6_Enable) - {// Benchmark 6 + if (cfgBenchmark6_Enable) + { // Benchmark 6 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<btTransform> transforms; - btDbvtBenchmark::NilPolicy policy; + btDbvt dbvt; + btAlignedObjectArray<btTransform> transforms; + btDbvtBenchmark::NilPolicy policy; transforms.resize(cfgBenchmark6_Iterations); - for(int i=0;i<transforms.size();++i) + for (int i = 0; i < transforms.size(); ++i) { - transforms[i]=btDbvtBenchmark::RandTransform(cfgVolumeCenterScale*cfgBenchmark6_OffsetScale); + transforms[i] = btDbvtBenchmark::RandTransform(cfgVolumeCenterScale * cfgBenchmark6_OffsetScale); } - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); printf("[6] btDbvt::collideTT xform,self: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark6_Iterations;++i) + for (int i = 0; i < cfgBenchmark6_Iterations; ++i) { - btDbvt::collideTT(dbvt.m_root,dbvt.m_root,transforms[i],policy); + btDbvt::collideTT(dbvt.m_root, dbvt.m_root, transforms[i], policy); } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark6_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark6_Reference) * 100 / time); } - if(cfgBenchmark7_Enable) - {// Benchmark 7 + if (cfgBenchmark7_Enable) + { // Benchmark 7 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<btVector3> rayorg; - btAlignedObjectArray<btVector3> raydir; - btDbvtBenchmark::NilPolicy policy; + btDbvt dbvt; + btAlignedObjectArray<btVector3> rayorg; + btAlignedObjectArray<btVector3> raydir; + btDbvtBenchmark::NilPolicy policy; rayorg.resize(cfgBenchmark7_Iterations); raydir.resize(cfgBenchmark7_Iterations); - for(int i=0;i<rayorg.size();++i) + for (int i = 0; i < rayorg.size(); ++i) { - rayorg[i]=btDbvtBenchmark::RandVector3(cfgVolumeCenterScale*2); - raydir[i]=btDbvtBenchmark::RandVector3(cfgVolumeCenterScale*2); + rayorg[i] = btDbvtBenchmark::RandVector3(cfgVolumeCenterScale * 2); + raydir[i] = btDbvtBenchmark::RandVector3(cfgVolumeCenterScale * 2); } - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); printf("[7] btDbvt::rayTest: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark7_Passes;++i) + for (int i = 0; i < cfgBenchmark7_Passes; ++i) { - for(int j=0;j<cfgBenchmark7_Iterations;++j) + for (int j = 0; j < cfgBenchmark7_Iterations; ++j) { - btDbvt::rayTest(dbvt.m_root,rayorg[j],rayorg[j]+raydir[j],policy); + btDbvt::rayTest(dbvt.m_root, rayorg[j], rayorg[j] + raydir[j], policy); } } - const int time=(int)wallclock.getTimeMilliseconds(); - unsigned rays=cfgBenchmark7_Passes*cfgBenchmark7_Iterations; - printf("%u ms (%i%%),(%u r/s)\r\n",time,(time-cfgBenchmark7_Reference)*100/time,(rays*1000)/time); + const int time = (int)wallclock.getTimeMilliseconds(); + unsigned rays = cfgBenchmark7_Passes * cfgBenchmark7_Iterations; + printf("%u ms (%i%%),(%u r/s)\r\n", time, (time - cfgBenchmark7_Reference) * 100 / time, (rays * 1000) / time); } - if(cfgBenchmark8_Enable) - {// Benchmark 8 + if (cfgBenchmark8_Enable) + { // Benchmark 8 srand(380843); - btDbvt dbvt; - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvt dbvt; + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); printf("[8] insert/remove: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark8_Passes;++i) + for (int i = 0; i < cfgBenchmark8_Passes; ++i) { - for(int j=0;j<cfgBenchmark8_Iterations;++j) + for (int j = 0; j < cfgBenchmark8_Iterations; ++j) { - dbvt.remove(dbvt.insert(btDbvtBenchmark::RandVolume(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale),0)); + dbvt.remove(dbvt.insert(btDbvtBenchmark::RandVolume(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale), 0)); } } - const int time=(int)wallclock.getTimeMilliseconds(); - const int ir=cfgBenchmark8_Passes*cfgBenchmark8_Iterations; - printf("%u ms (%i%%),(%u ir/s)\r\n",time,(time-cfgBenchmark8_Reference)*100/time,ir*1000/time); + const int time = (int)wallclock.getTimeMilliseconds(); + const int ir = cfgBenchmark8_Passes * cfgBenchmark8_Iterations; + printf("%u ms (%i%%),(%u ir/s)\r\n", time, (time - cfgBenchmark8_Reference) * 100 / time, ir * 1000 / time); } - if(cfgBenchmark9_Enable) - {// Benchmark 9 + if (cfgBenchmark9_Enable) + { // Benchmark 9 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<const btDbvtNode*> leaves; - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvt dbvt; + btAlignedObjectArray<const btDbvtNode*> leaves; + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); - dbvt.extractLeaves(dbvt.m_root,leaves); + dbvt.extractLeaves(dbvt.m_root, leaves); printf("[9] updates (teleport): "); wallclock.reset(); - for(int i=0;i<cfgBenchmark9_Passes;++i) + for (int i = 0; i < cfgBenchmark9_Passes; ++i) { - for(int j=0;j<cfgBenchmark9_Iterations;++j) + for (int j = 0; j < cfgBenchmark9_Iterations; ++j) { - dbvt.update(const_cast<btDbvtNode*>(leaves[rand()%cfgLeaves]), - btDbvtBenchmark::RandVolume(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale)); + dbvt.update(const_cast<btDbvtNode*>(leaves[rand() % cfgLeaves]), + btDbvtBenchmark::RandVolume(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale)); } } - const int time=(int)wallclock.getTimeMilliseconds(); - const int up=cfgBenchmark9_Passes*cfgBenchmark9_Iterations; - printf("%u ms (%i%%),(%u u/s)\r\n",time,(time-cfgBenchmark9_Reference)*100/time,up*1000/time); + const int time = (int)wallclock.getTimeMilliseconds(); + const int up = cfgBenchmark9_Passes * cfgBenchmark9_Iterations; + printf("%u ms (%i%%),(%u u/s)\r\n", time, (time - cfgBenchmark9_Reference) * 100 / time, up * 1000 / time); } - if(cfgBenchmark10_Enable) - {// Benchmark 10 + if (cfgBenchmark10_Enable) + { // Benchmark 10 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<const btDbvtNode*> leaves; - btAlignedObjectArray<btVector3> vectors; + btDbvt dbvt; + btAlignedObjectArray<const btDbvtNode*> leaves; + btAlignedObjectArray<btVector3> vectors; vectors.resize(cfgBenchmark10_Iterations); - for(int i=0;i<vectors.size();++i) + for (int i = 0; i < vectors.size(); ++i) { - vectors[i]=(btDbvtBenchmark::RandVector3()*2-btVector3(1,1,1))*cfgBenchmark10_Scale; + vectors[i] = (btDbvtBenchmark::RandVector3() * 2 - btVector3(1, 1, 1)) * cfgBenchmark10_Scale; } - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); - dbvt.extractLeaves(dbvt.m_root,leaves); + dbvt.extractLeaves(dbvt.m_root, leaves); printf("[10] updates (jitter): "); wallclock.reset(); - for(int i=0;i<cfgBenchmark10_Passes;++i) + for (int i = 0; i < cfgBenchmark10_Passes; ++i) { - for(int j=0;j<cfgBenchmark10_Iterations;++j) - { - const btVector3& d=vectors[j]; - btDbvtNode* l=const_cast<btDbvtNode*>(leaves[rand()%cfgLeaves]); - btDbvtVolume v=btDbvtVolume::FromMM(l->volume.Mins()+d,l->volume.Maxs()+d); - dbvt.update(l,v); + for (int j = 0; j < cfgBenchmark10_Iterations; ++j) + { + const btVector3& d = vectors[j]; + btDbvtNode* l = const_cast<btDbvtNode*>(leaves[rand() % cfgLeaves]); + btDbvtVolume v = btDbvtVolume::FromMM(l->volume.Mins() + d, l->volume.Maxs() + d); + dbvt.update(l, v); } } - const int time=(int)wallclock.getTimeMilliseconds(); - const int up=cfgBenchmark10_Passes*cfgBenchmark10_Iterations; - printf("%u ms (%i%%),(%u u/s)\r\n",time,(time-cfgBenchmark10_Reference)*100/time,up*1000/time); + const int time = (int)wallclock.getTimeMilliseconds(); + const int up = cfgBenchmark10_Passes * cfgBenchmark10_Iterations; + printf("%u ms (%i%%),(%u u/s)\r\n", time, (time - cfgBenchmark10_Reference) * 100 / time, up * 1000 / time); } - if(cfgBenchmark11_Enable) - {// Benchmark 11 + if (cfgBenchmark11_Enable) + { // Benchmark 11 srand(380843); - btDbvt dbvt; - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvt dbvt; + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); printf("[11] optimize (incremental): "); - wallclock.reset(); - for(int i=0;i<cfgBenchmark11_Passes;++i) + wallclock.reset(); + for (int i = 0; i < cfgBenchmark11_Passes; ++i) { dbvt.optimizeIncremental(cfgBenchmark11_Iterations); } - const int time=(int)wallclock.getTimeMilliseconds(); - const int op=cfgBenchmark11_Passes*cfgBenchmark11_Iterations; - printf("%u ms (%i%%),(%u o/s)\r\n",time,(time-cfgBenchmark11_Reference)*100/time,op/time*1000); + const int time = (int)wallclock.getTimeMilliseconds(); + const int op = cfgBenchmark11_Passes * cfgBenchmark11_Iterations; + printf("%u ms (%i%%),(%u o/s)\r\n", time, (time - cfgBenchmark11_Reference) * 100 / time, op / time * 1000); } - if(cfgBenchmark12_Enable) - {// Benchmark 12 + if (cfgBenchmark12_Enable) + { // Benchmark 12 srand(380843); - btAlignedObjectArray<btDbvtVolume> volumes; - btAlignedObjectArray<bool> results; + btAlignedObjectArray<btDbvtVolume> volumes; + btAlignedObjectArray<bool> results; volumes.resize(cfgLeaves); results.resize(cfgLeaves); - for(int i=0;i<cfgLeaves;++i) + for (int i = 0; i < cfgLeaves; ++i) { - volumes[i]=btDbvtBenchmark::RandVolume(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale); + volumes[i] = btDbvtBenchmark::RandVolume(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale); } printf("[12] btDbvtVolume notequal: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark12_Iterations;++i) + for (int i = 0; i < cfgBenchmark12_Iterations; ++i) { - for(int j=0;j<cfgLeaves;++j) + for (int j = 0; j < cfgLeaves; ++j) { - for(int k=0;k<cfgLeaves;++k) + for (int k = 0; k < cfgLeaves; ++k) { - results[k]=NotEqual(volumes[j],volumes[k]); + results[k] = NotEqual(volumes[j], volumes[k]); } } } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark12_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark12_Reference) * 100 / time); } - if(cfgBenchmark13_Enable) - {// Benchmark 13 + if (cfgBenchmark13_Enable) + { // Benchmark 13 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<btVector3> vectors; - btDbvtBenchmark::NilPolicy policy; + btDbvt dbvt; + btAlignedObjectArray<btVector3> vectors; + btDbvtBenchmark::NilPolicy policy; vectors.resize(cfgBenchmark13_Iterations); - for(int i=0;i<vectors.size();++i) + for (int i = 0; i < vectors.size(); ++i) { - vectors[i]=(btDbvtBenchmark::RandVector3()*2-btVector3(1,1,1)).normalized(); + vectors[i] = (btDbvtBenchmark::RandVector3() * 2 - btVector3(1, 1, 1)).normalized(); } - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); printf("[13] culling(OCL+fullsort): "); - wallclock.reset(); - for(int i=0;i<cfgBenchmark13_Iterations;++i) + wallclock.reset(); + for (int i = 0; i < cfgBenchmark13_Iterations; ++i) { - static const btScalar offset=0; - policy.m_depth=-SIMD_INFINITY; - dbvt.collideOCL(dbvt.m_root,&vectors[i],&offset,vectors[i],1,policy); + static const btScalar offset = 0; + policy.m_depth = -SIMD_INFINITY; + dbvt.collideOCL(dbvt.m_root, &vectors[i], &offset, vectors[i], 1, policy); } - const int time=(int)wallclock.getTimeMilliseconds(); - const int t=cfgBenchmark13_Iterations; - printf("%u ms (%i%%),(%u t/s)\r\n",time,(time-cfgBenchmark13_Reference)*100/time,(t*1000)/time); + const int time = (int)wallclock.getTimeMilliseconds(); + const int t = cfgBenchmark13_Iterations; + printf("%u ms (%i%%),(%u t/s)\r\n", time, (time - cfgBenchmark13_Reference) * 100 / time, (t * 1000) / time); } - if(cfgBenchmark14_Enable) - {// Benchmark 14 + if (cfgBenchmark14_Enable) + { // Benchmark 14 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<btVector3> vectors; - btDbvtBenchmark::P14 policy; + btDbvt dbvt; + btAlignedObjectArray<btVector3> vectors; + btDbvtBenchmark::P14 policy; vectors.resize(cfgBenchmark14_Iterations); - for(int i=0;i<vectors.size();++i) + for (int i = 0; i < vectors.size(); ++i) { - vectors[i]=(btDbvtBenchmark::RandVector3()*2-btVector3(1,1,1)).normalized(); + vectors[i] = (btDbvtBenchmark::RandVector3() * 2 - btVector3(1, 1, 1)).normalized(); } - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); policy.m_nodes.reserve(cfgLeaves); printf("[14] culling(OCL+qsort): "); - wallclock.reset(); - for(int i=0;i<cfgBenchmark14_Iterations;++i) + wallclock.reset(); + for (int i = 0; i < cfgBenchmark14_Iterations; ++i) { - static const btScalar offset=0; + static const btScalar offset = 0; policy.m_nodes.resize(0); - dbvt.collideOCL(dbvt.m_root,&vectors[i],&offset,vectors[i],1,policy,false); + dbvt.collideOCL(dbvt.m_root, &vectors[i], &offset, vectors[i], 1, policy, false); policy.m_nodes.quickSort(btDbvtBenchmark::P14::sortfnc); } - const int time=(int)wallclock.getTimeMilliseconds(); - const int t=cfgBenchmark14_Iterations; - printf("%u ms (%i%%),(%u t/s)\r\n",time,(time-cfgBenchmark14_Reference)*100/time,(t*1000)/time); + const int time = (int)wallclock.getTimeMilliseconds(); + const int t = cfgBenchmark14_Iterations; + printf("%u ms (%i%%),(%u t/s)\r\n", time, (time - cfgBenchmark14_Reference) * 100 / time, (t * 1000) / time); } - if(cfgBenchmark15_Enable) - {// Benchmark 15 + if (cfgBenchmark15_Enable) + { // Benchmark 15 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<btVector3> vectors; - btDbvtBenchmark::P15 policy; + btDbvt dbvt; + btAlignedObjectArray<btVector3> vectors; + btDbvtBenchmark::P15 policy; vectors.resize(cfgBenchmark15_Iterations); - for(int i=0;i<vectors.size();++i) + for (int i = 0; i < vectors.size(); ++i) { - vectors[i]=(btDbvtBenchmark::RandVector3()*2-btVector3(1,1,1)).normalized(); + vectors[i] = (btDbvtBenchmark::RandVector3() * 2 - btVector3(1, 1, 1)).normalized(); } - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); policy.m_nodes.reserve(cfgLeaves); printf("[15] culling(KDOP+qsort): "); - wallclock.reset(); - for(int i=0;i<cfgBenchmark15_Iterations;++i) + wallclock.reset(); + for (int i = 0; i < cfgBenchmark15_Iterations; ++i) { - static const btScalar offset=0; + static const btScalar offset = 0; policy.m_nodes.resize(0); - policy.m_axis=vectors[i]; - dbvt.collideKDOP(dbvt.m_root,&vectors[i],&offset,1,policy); + policy.m_axis = vectors[i]; + dbvt.collideKDOP(dbvt.m_root, &vectors[i], &offset, 1, policy); policy.m_nodes.quickSort(btDbvtBenchmark::P15::sortfnc); } - const int time=(int)wallclock.getTimeMilliseconds(); - const int t=cfgBenchmark15_Iterations; - printf("%u ms (%i%%),(%u t/s)\r\n",time,(time-cfgBenchmark15_Reference)*100/time,(t*1000)/time); + const int time = (int)wallclock.getTimeMilliseconds(); + const int t = cfgBenchmark15_Iterations; + printf("%u ms (%i%%),(%u t/s)\r\n", time, (time - cfgBenchmark15_Reference) * 100 / time, (t * 1000) / time); } - if(cfgBenchmark16_Enable) - {// Benchmark 16 + if (cfgBenchmark16_Enable) + { // Benchmark 16 srand(380843); - btDbvt dbvt; - btAlignedObjectArray<btDbvtNode*> batch; - btDbvtBenchmark::RandTree(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale,cfgLeaves,dbvt); + btDbvt dbvt; + btAlignedObjectArray<btDbvtNode*> batch; + btDbvtBenchmark::RandTree(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale, cfgLeaves, dbvt); dbvt.optimizeTopDown(); batch.reserve(cfgBenchmark16_BatchCount); - printf("[16] insert/remove batch(%u): ",cfgBenchmark16_BatchCount); + printf("[16] insert/remove batch(%u): ", cfgBenchmark16_BatchCount); wallclock.reset(); - for(int i=0;i<cfgBenchmark16_Passes;++i) + for (int i = 0; i < cfgBenchmark16_Passes; ++i) { - for(int j=0;j<cfgBenchmark16_BatchCount;++j) + for (int j = 0; j < cfgBenchmark16_BatchCount; ++j) { - batch.push_back(dbvt.insert(btDbvtBenchmark::RandVolume(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale),0)); + batch.push_back(dbvt.insert(btDbvtBenchmark::RandVolume(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale), 0)); } - for(int j=0;j<cfgBenchmark16_BatchCount;++j) + for (int j = 0; j < cfgBenchmark16_BatchCount; ++j) { dbvt.remove(batch[j]); } batch.resize(0); } - const int time=(int)wallclock.getTimeMilliseconds(); - const int ir=cfgBenchmark16_Passes*cfgBenchmark16_BatchCount; - printf("%u ms (%i%%),(%u bir/s)\r\n",time,(time-cfgBenchmark16_Reference)*100/time,int(ir*1000.0/time)); + const int time = (int)wallclock.getTimeMilliseconds(); + const int ir = cfgBenchmark16_Passes * cfgBenchmark16_BatchCount; + printf("%u ms (%i%%),(%u bir/s)\r\n", time, (time - cfgBenchmark16_Reference) * 100 / time, int(ir * 1000.0 / time)); } - if(cfgBenchmark17_Enable) - {// Benchmark 17 + if (cfgBenchmark17_Enable) + { // Benchmark 17 srand(380843); - btAlignedObjectArray<btDbvtVolume> volumes; - btAlignedObjectArray<int> results; - btAlignedObjectArray<int> indices; + btAlignedObjectArray<btDbvtVolume> volumes; + btAlignedObjectArray<int> results; + btAlignedObjectArray<int> indices; volumes.resize(cfgLeaves); results.resize(cfgLeaves); indices.resize(cfgLeaves); - for(int i=0;i<cfgLeaves;++i) + for (int i = 0; i < cfgLeaves; ++i) { - indices[i]=i; - volumes[i]=btDbvtBenchmark::RandVolume(cfgVolumeCenterScale,cfgVolumeExentsBase,cfgVolumeExentsScale); + indices[i] = i; + volumes[i] = btDbvtBenchmark::RandVolume(cfgVolumeCenterScale, cfgVolumeExentsBase, cfgVolumeExentsScale); } - for(int i=0;i<cfgLeaves;++i) + for (int i = 0; i < cfgLeaves; ++i) { - btSwap(indices[i],indices[rand()%cfgLeaves]); + btSwap(indices[i], indices[rand() % cfgLeaves]); } printf("[17] btDbvtVolume select: "); wallclock.reset(); - for(int i=0;i<cfgBenchmark17_Iterations;++i) + for (int i = 0; i < cfgBenchmark17_Iterations; ++i) { - for(int j=0;j<cfgLeaves;++j) + for (int j = 0; j < cfgLeaves; ++j) { - for(int k=0;k<cfgLeaves;++k) + for (int k = 0; k < cfgLeaves; ++k) { - const int idx=indices[k]; - results[idx]=Select(volumes[idx],volumes[j],volumes[k]); + const int idx = indices[k]; + results[idx] = Select(volumes[idx], volumes[j], volumes[k]); } } } - const int time=(int)wallclock.getTimeMilliseconds(); - printf("%u ms (%i%%)\r\n",time,(time-cfgBenchmark17_Reference)*100/time); + const int time = (int)wallclock.getTimeMilliseconds(); + printf("%u ms (%i%%)\r\n", time, (time - cfgBenchmark17_Reference) * 100 / time); } printf("\r\n\r\n"); } diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.h index b5a0014580..a316dbf207 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvt.h @@ -26,50 +26,49 @@ subject to the following restrictions: // Compile time configuration // - // Implementation profiles -#define DBVT_IMPL_GENERIC 0 // Generic implementation -#define DBVT_IMPL_SSE 1 // SSE +#define DBVT_IMPL_GENERIC 0 // Generic implementation +#define DBVT_IMPL_SSE 1 // SSE // Template implementation of ICollide #ifdef _WIN32 -#if (defined (_MSC_VER) && _MSC_VER >= 1400) -#define DBVT_USE_TEMPLATE 1 +#if (defined(_MSC_VER) && _MSC_VER >= 1400) +#define DBVT_USE_TEMPLATE 1 #else -#define DBVT_USE_TEMPLATE 0 +#define DBVT_USE_TEMPLATE 0 #endif #else -#define DBVT_USE_TEMPLATE 0 +#define DBVT_USE_TEMPLATE 0 #endif // Use only intrinsics instead of inline asm -#define DBVT_USE_INTRINSIC_SSE 1 +#define DBVT_USE_INTRINSIC_SSE 1 // Using memmov for collideOCL -#define DBVT_USE_MEMMOVE 1 +#define DBVT_USE_MEMMOVE 1 // Enable benchmarking code -#define DBVT_ENABLE_BENCHMARK 0 +#define DBVT_ENABLE_BENCHMARK 0 // Inlining -#define DBVT_INLINE SIMD_FORCE_INLINE +#define DBVT_INLINE SIMD_FORCE_INLINE // Specific methods implementation //SSE gives errors on a MSVC 7.1 -#if defined (BT_USE_SSE) //&& defined (_WIN32) -#define DBVT_SELECT_IMPL DBVT_IMPL_SSE -#define DBVT_MERGE_IMPL DBVT_IMPL_SSE -#define DBVT_INT0_IMPL DBVT_IMPL_SSE +#if defined(BT_USE_SSE) //&& defined (_WIN32) +#define DBVT_SELECT_IMPL DBVT_IMPL_SSE +#define DBVT_MERGE_IMPL DBVT_IMPL_SSE +#define DBVT_INT0_IMPL DBVT_IMPL_SSE #else -#define DBVT_SELECT_IMPL DBVT_IMPL_GENERIC -#define DBVT_MERGE_IMPL DBVT_IMPL_GENERIC -#define DBVT_INT0_IMPL DBVT_IMPL_GENERIC +#define DBVT_SELECT_IMPL DBVT_IMPL_GENERIC +#define DBVT_MERGE_IMPL DBVT_IMPL_GENERIC +#define DBVT_INT0_IMPL DBVT_IMPL_GENERIC #endif -#if (DBVT_SELECT_IMPL==DBVT_IMPL_SSE)|| \ - (DBVT_MERGE_IMPL==DBVT_IMPL_SSE)|| \ - (DBVT_INT0_IMPL==DBVT_IMPL_SSE) +#if (DBVT_SELECT_IMPL == DBVT_IMPL_SSE) || \ + (DBVT_MERGE_IMPL == DBVT_IMPL_SSE) || \ + (DBVT_INT0_IMPL == DBVT_IMPL_SSE) #include <emmintrin.h> #endif @@ -78,21 +77,24 @@ subject to the following restrictions: // #if DBVT_USE_TEMPLATE -#define DBVT_VIRTUAL +#define DBVT_VIRTUAL #define DBVT_VIRTUAL_DTOR(a) -#define DBVT_PREFIX template <typename T> -#define DBVT_IPOLICY T& policy -#define DBVT_CHECKTYPE static const ICollide& typechecker=*(T*)1;(void)typechecker; +#define DBVT_PREFIX template <typename T> +#define DBVT_IPOLICY T& policy +#define DBVT_CHECKTYPE \ + static const ICollide& typechecker = *(T*)1; \ + (void)typechecker; #else -#define DBVT_VIRTUAL_DTOR(a) virtual ~a() {} -#define DBVT_VIRTUAL virtual +#define DBVT_VIRTUAL_DTOR(a) \ + virtual ~a() {} +#define DBVT_VIRTUAL virtual #define DBVT_PREFIX -#define DBVT_IPOLICY ICollide& policy +#define DBVT_IPOLICY ICollide& policy #define DBVT_CHECKTYPE #endif #if DBVT_USE_MEMMOVE -#if !defined( __CELLOS_LV2__) && !defined(__MWERKS__) +#if !defined(__CELLOS_LV2__) && !defined(__MWERKS__) #include <memory.h> #endif #include <string.h> @@ -122,194 +124,193 @@ subject to the following restrictions: #error "DBVT_INT0_IMPL undefined" #endif - // // Defaults volumes // -/* btDbvtAabbMm */ -struct btDbvtAabbMm +/* btDbvtAabbMm */ +struct btDbvtAabbMm { - DBVT_INLINE btVector3 Center() const { return((mi+mx)/2); } - DBVT_INLINE btVector3 Lengths() const { return(mx-mi); } - DBVT_INLINE btVector3 Extents() const { return((mx-mi)/2); } - DBVT_INLINE const btVector3& Mins() const { return(mi); } - DBVT_INLINE const btVector3& Maxs() const { return(mx); } - static inline btDbvtAabbMm FromCE(const btVector3& c,const btVector3& e); - static inline btDbvtAabbMm FromCR(const btVector3& c,btScalar r); - static inline btDbvtAabbMm FromMM(const btVector3& mi,const btVector3& mx); - static inline btDbvtAabbMm FromPoints(const btVector3* pts,int n); - static inline btDbvtAabbMm FromPoints(const btVector3** ppts,int n); - DBVT_INLINE void Expand(const btVector3& e); - DBVT_INLINE void SignedExpand(const btVector3& e); - DBVT_INLINE bool Contain(const btDbvtAabbMm& a) const; - DBVT_INLINE int Classify(const btVector3& n,btScalar o,int s) const; - DBVT_INLINE btScalar ProjectMinimum(const btVector3& v,unsigned signs) const; - DBVT_INLINE friend bool Intersect( const btDbvtAabbMm& a, - const btDbvtAabbMm& b); - - DBVT_INLINE friend bool Intersect( const btDbvtAabbMm& a, - const btVector3& b); - - DBVT_INLINE friend btScalar Proximity( const btDbvtAabbMm& a, - const btDbvtAabbMm& b); - DBVT_INLINE friend int Select( const btDbvtAabbMm& o, - const btDbvtAabbMm& a, - const btDbvtAabbMm& b); - DBVT_INLINE friend void Merge( const btDbvtAabbMm& a, - const btDbvtAabbMm& b, - btDbvtAabbMm& r); - DBVT_INLINE friend bool NotEqual( const btDbvtAabbMm& a, - const btDbvtAabbMm& b); - - DBVT_INLINE btVector3& tMins() { return(mi); } - DBVT_INLINE btVector3& tMaxs() { return(mx); } - + DBVT_INLINE btVector3 Center() const { return ((mi + mx) / 2); } + DBVT_INLINE btVector3 Lengths() const { return (mx - mi); } + DBVT_INLINE btVector3 Extents() const { return ((mx - mi) / 2); } + DBVT_INLINE const btVector3& Mins() const { return (mi); } + DBVT_INLINE const btVector3& Maxs() const { return (mx); } + static inline btDbvtAabbMm FromCE(const btVector3& c, const btVector3& e); + static inline btDbvtAabbMm FromCR(const btVector3& c, btScalar r); + static inline btDbvtAabbMm FromMM(const btVector3& mi, const btVector3& mx); + static inline btDbvtAabbMm FromPoints(const btVector3* pts, int n); + static inline btDbvtAabbMm FromPoints(const btVector3** ppts, int n); + DBVT_INLINE void Expand(const btVector3& e); + DBVT_INLINE void SignedExpand(const btVector3& e); + DBVT_INLINE bool Contain(const btDbvtAabbMm& a) const; + DBVT_INLINE int Classify(const btVector3& n, btScalar o, int s) const; + DBVT_INLINE btScalar ProjectMinimum(const btVector3& v, unsigned signs) const; + DBVT_INLINE friend bool Intersect(const btDbvtAabbMm& a, + const btDbvtAabbMm& b); + + DBVT_INLINE friend bool Intersect(const btDbvtAabbMm& a, + const btVector3& b); + + DBVT_INLINE friend btScalar Proximity(const btDbvtAabbMm& a, + const btDbvtAabbMm& b); + DBVT_INLINE friend int Select(const btDbvtAabbMm& o, + const btDbvtAabbMm& a, + const btDbvtAabbMm& b); + DBVT_INLINE friend void Merge(const btDbvtAabbMm& a, + const btDbvtAabbMm& b, + btDbvtAabbMm& r); + DBVT_INLINE friend bool NotEqual(const btDbvtAabbMm& a, + const btDbvtAabbMm& b); + + DBVT_INLINE btVector3& tMins() { return (mi); } + DBVT_INLINE btVector3& tMaxs() { return (mx); } + private: - DBVT_INLINE void AddSpan(const btVector3& d,btScalar& smi,btScalar& smx) const; + DBVT_INLINE void AddSpan(const btVector3& d, btScalar& smi, btScalar& smx) const; + private: - btVector3 mi,mx; + btVector3 mi, mx; }; -// Types -typedef btDbvtAabbMm btDbvtVolume; +// Types +typedef btDbvtAabbMm btDbvtVolume; -/* btDbvtNode */ -struct btDbvtNode +/* btDbvtNode */ +struct btDbvtNode { - btDbvtVolume volume; - btDbvtNode* parent; - DBVT_INLINE bool isleaf() const { return(childs[1]==0); } - DBVT_INLINE bool isinternal() const { return(!isleaf()); } - union - { - btDbvtNode* childs[2]; - void* data; - int dataAsInt; + btDbvtVolume volume; + btDbvtNode* parent; + DBVT_INLINE bool isleaf() const { return (childs[1] == 0); } + DBVT_INLINE bool isinternal() const { return (!isleaf()); } + union { + btDbvtNode* childs[2]; + void* data; + int dataAsInt; }; }; typedef btAlignedObjectArray<const btDbvtNode*> btNodeStack; - ///The btDbvt class implements a fast dynamic bounding volume tree based on axis aligned bounding boxes (aabb tree). ///This btDbvt is used for soft body collision detection and for the btDbvtBroadphase. It has a fast insert, remove and update of nodes. ///Unlike the btQuantizedBvh, nodes can be dynamically moved around, which allows for change in topology of the underlying data structure. -struct btDbvt +struct btDbvt { - /* Stack element */ - struct sStkNN + /* Stack element */ + struct sStkNN { - const btDbvtNode* a; - const btDbvtNode* b; + const btDbvtNode* a; + const btDbvtNode* b; sStkNN() {} - sStkNN(const btDbvtNode* na,const btDbvtNode* nb) : a(na),b(nb) {} + sStkNN(const btDbvtNode* na, const btDbvtNode* nb) : a(na), b(nb) {} }; - struct sStkNP + struct sStkNP { - const btDbvtNode* node; - int mask; - sStkNP(const btDbvtNode* n,unsigned m) : node(n),mask(m) {} + const btDbvtNode* node; + int mask; + sStkNP(const btDbvtNode* n, unsigned m) : node(n), mask(m) {} }; - struct sStkNPS + struct sStkNPS { - const btDbvtNode* node; - int mask; - btScalar value; + const btDbvtNode* node; + int mask; + btScalar value; sStkNPS() {} - sStkNPS(const btDbvtNode* n,unsigned m,btScalar v) : node(n),mask(m),value(v) {} + sStkNPS(const btDbvtNode* n, unsigned m, btScalar v) : node(n), mask(m), value(v) {} }; - struct sStkCLN + struct sStkCLN { - const btDbvtNode* node; - btDbvtNode* parent; - sStkCLN(const btDbvtNode* n,btDbvtNode* p) : node(n),parent(p) {} + const btDbvtNode* node; + btDbvtNode* parent; + sStkCLN(const btDbvtNode* n, btDbvtNode* p) : node(n), parent(p) {} }; // Policies/Interfaces - /* ICollide */ - struct ICollide - { + /* ICollide */ + struct ICollide + { DBVT_VIRTUAL_DTOR(ICollide) - DBVT_VIRTUAL void Process(const btDbvtNode*,const btDbvtNode*) {} - DBVT_VIRTUAL void Process(const btDbvtNode*) {} - DBVT_VIRTUAL void Process(const btDbvtNode* n,btScalar) { Process(n); } - DBVT_VIRTUAL bool Descent(const btDbvtNode*) { return(true); } - DBVT_VIRTUAL bool AllLeaves(const btDbvtNode*) { return(true); } + DBVT_VIRTUAL void Process(const btDbvtNode*, const btDbvtNode*) {} + DBVT_VIRTUAL void Process(const btDbvtNode*) {} + DBVT_VIRTUAL void Process(const btDbvtNode* n, btScalar) { Process(n); } + DBVT_VIRTUAL bool Descent(const btDbvtNode*) { return (true); } + DBVT_VIRTUAL bool AllLeaves(const btDbvtNode*) { return (true); } }; - /* IWriter */ - struct IWriter + /* IWriter */ + struct IWriter { virtual ~IWriter() {} - virtual void Prepare(const btDbvtNode* root,int numnodes)=0; - virtual void WriteNode(const btDbvtNode*,int index,int parent,int child0,int child1)=0; - virtual void WriteLeaf(const btDbvtNode*,int index,int parent)=0; + virtual void Prepare(const btDbvtNode* root, int numnodes) = 0; + virtual void WriteNode(const btDbvtNode*, int index, int parent, int child0, int child1) = 0; + virtual void WriteLeaf(const btDbvtNode*, int index, int parent) = 0; }; - /* IClone */ - struct IClone + /* IClone */ + struct IClone { - virtual ~IClone() {} - virtual void CloneLeaf(btDbvtNode*) {} + virtual ~IClone() {} + virtual void CloneLeaf(btDbvtNode*) {} }; // Constants - enum { - SIMPLE_STACKSIZE = 64, - DOUBLE_STACKSIZE = SIMPLE_STACKSIZE*2 + enum + { + SIMPLE_STACKSIZE = 64, + DOUBLE_STACKSIZE = SIMPLE_STACKSIZE * 2 }; // Fields - btDbvtNode* m_root; - btDbvtNode* m_free; - int m_lkhd; - int m_leaves; - unsigned m_opath; - - - btAlignedObjectArray<sStkNN> m_stkStack; + btDbvtNode* m_root; + btDbvtNode* m_free; + int m_lkhd; + int m_leaves; + unsigned m_opath; + btAlignedObjectArray<sStkNN> m_stkStack; // Methods btDbvt(); ~btDbvt(); - void clear(); - bool empty() const { return(0==m_root); } - void optimizeBottomUp(); - void optimizeTopDown(int bu_treshold=128); - void optimizeIncremental(int passes); - btDbvtNode* insert(const btDbvtVolume& box,void* data); - void update(btDbvtNode* leaf,int lookahead=-1); - void update(btDbvtNode* leaf,btDbvtVolume& volume); - bool update(btDbvtNode* leaf,btDbvtVolume& volume,const btVector3& velocity,btScalar margin); - bool update(btDbvtNode* leaf,btDbvtVolume& volume,const btVector3& velocity); - bool update(btDbvtNode* leaf,btDbvtVolume& volume,btScalar margin); - void remove(btDbvtNode* leaf); - void write(IWriter* iwriter) const; - void clone(btDbvt& dest,IClone* iclone=0) const; - static int maxdepth(const btDbvtNode* node); - static int countLeaves(const btDbvtNode* node); - static void extractLeaves(const btDbvtNode* node,btAlignedObjectArray<const btDbvtNode*>& leaves); + void clear(); + bool empty() const { return (0 == m_root); } + void optimizeBottomUp(); + void optimizeTopDown(int bu_treshold = 128); + void optimizeIncremental(int passes); + btDbvtNode* insert(const btDbvtVolume& box, void* data); + void update(btDbvtNode* leaf, int lookahead = -1); + void update(btDbvtNode* leaf, btDbvtVolume& volume); + bool update(btDbvtNode* leaf, btDbvtVolume& volume, const btVector3& velocity, btScalar margin); + bool update(btDbvtNode* leaf, btDbvtVolume& volume, const btVector3& velocity); + bool update(btDbvtNode* leaf, btDbvtVolume& volume, btScalar margin); + void remove(btDbvtNode* leaf); + void write(IWriter* iwriter) const; + void clone(btDbvt& dest, IClone* iclone = 0) const; + static int maxdepth(const btDbvtNode* node); + static int countLeaves(const btDbvtNode* node); + static void extractLeaves(const btDbvtNode* node, btAlignedObjectArray<const btDbvtNode*>& leaves); #if DBVT_ENABLE_BENCHMARK - static void benchmark(); + static void benchmark(); #else - static void benchmark(){} + static void benchmark() + { + } #endif // DBVT_IPOLICY must support ICollide policy/interface DBVT_PREFIX - static void enumNodes( const btDbvtNode* root, - DBVT_IPOLICY); + static void enumNodes(const btDbvtNode* root, + DBVT_IPOLICY); DBVT_PREFIX - static void enumLeaves( const btDbvtNode* root, - DBVT_IPOLICY); + static void enumLeaves(const btDbvtNode* root, + DBVT_IPOLICY); DBVT_PREFIX - void collideTT( const btDbvtNode* root0, - const btDbvtNode* root1, - DBVT_IPOLICY); + void collideTT(const btDbvtNode* root0, + const btDbvtNode* root1, + DBVT_IPOLICY); DBVT_PREFIX - void collideTTpersistentStack( const btDbvtNode* root0, - const btDbvtNode* root1, - DBVT_IPOLICY); + void collideTTpersistentStack(const btDbvtNode* root0, + const btDbvtNode* root1, + DBVT_IPOLICY); #if 0 DBVT_PREFIX void collideTT( const btDbvtNode* root0, @@ -325,82 +326,89 @@ struct btDbvt #endif DBVT_PREFIX - void collideTV( const btDbvtNode* root, - const btDbvtVolume& volume, - DBVT_IPOLICY) const; - + void collideTV(const btDbvtNode* root, + const btDbvtVolume& volume, + DBVT_IPOLICY) const; + DBVT_PREFIX - void collideTVNoStackAlloc( const btDbvtNode* root, - const btDbvtVolume& volume, - btNodeStack& stack, - DBVT_IPOLICY) const; - - - - + void collideTVNoStackAlloc(const btDbvtNode* root, + const btDbvtVolume& volume, + btNodeStack& stack, + DBVT_IPOLICY) const; + ///rayTest is a re-entrant ray test, and can be called in parallel as long as the btAlignedAlloc is thread-safe (uses locking etc) ///rayTest is slower than rayTestInternal, because it builds a local stack, using memory allocations, and it recomputes signs/rayDirectionInverses each time DBVT_PREFIX - static void rayTest( const btDbvtNode* root, - const btVector3& rayFrom, - const btVector3& rayTo, - DBVT_IPOLICY); + static void rayTest(const btDbvtNode* root, + const btVector3& rayFrom, + const btVector3& rayTo, + DBVT_IPOLICY); ///rayTestInternal is faster than rayTest, because it uses a persistent stack (to reduce dynamic memory allocations to a minimum) and it uses precomputed signs/rayInverseDirections ///rayTestInternal is used by btDbvtBroadphase to accelerate world ray casts DBVT_PREFIX - void rayTestInternal( const btDbvtNode* root, - const btVector3& rayFrom, - const btVector3& rayTo, - const btVector3& rayDirectionInverse, - unsigned int signs[3], - btScalar lambda_max, - const btVector3& aabbMin, - const btVector3& aabbMax, - btAlignedObjectArray<const btDbvtNode*>& stack, - DBVT_IPOLICY) const; + void rayTestInternal(const btDbvtNode* root, + const btVector3& rayFrom, + const btVector3& rayTo, + const btVector3& rayDirectionInverse, + unsigned int signs[3], + btScalar lambda_max, + const btVector3& aabbMin, + const btVector3& aabbMax, + btAlignedObjectArray<const btDbvtNode*>& stack, + DBVT_IPOLICY) const; DBVT_PREFIX - static void collideKDOP(const btDbvtNode* root, - const btVector3* normals, - const btScalar* offsets, - int count, - DBVT_IPOLICY); + static void collideKDOP(const btDbvtNode* root, + const btVector3* normals, + const btScalar* offsets, + int count, + DBVT_IPOLICY); DBVT_PREFIX - static void collideOCL( const btDbvtNode* root, - const btVector3* normals, - const btScalar* offsets, - const btVector3& sortaxis, - int count, - DBVT_IPOLICY, - bool fullsort=true); + static void collideOCL(const btDbvtNode* root, + const btVector3* normals, + const btScalar* offsets, + const btVector3& sortaxis, + int count, + DBVT_IPOLICY, + bool fullsort = true); DBVT_PREFIX - static void collideTU( const btDbvtNode* root, - DBVT_IPOLICY); - // Helpers - static DBVT_INLINE int nearest(const int* i,const btDbvt::sStkNPS* a,btScalar v,int l,int h) + static void collideTU(const btDbvtNode* root, + DBVT_IPOLICY); + // Helpers + static DBVT_INLINE int nearest(const int* i, const btDbvt::sStkNPS* a, btScalar v, int l, int h) { - int m=0; - while(l<h) + int m = 0; + while (l < h) { - m=(l+h)>>1; - if(a[i[m]].value>=v) l=m+1; else h=m; + m = (l + h) >> 1; + if (a[i[m]].value >= v) + l = m + 1; + else + h = m; } - return(h); + return (h); } - static DBVT_INLINE int allocate( btAlignedObjectArray<int>& ifree, - btAlignedObjectArray<sStkNPS>& stock, - const sStkNPS& value) + static DBVT_INLINE int allocate(btAlignedObjectArray<int>& ifree, + btAlignedObjectArray<sStkNPS>& stock, + const sStkNPS& value) { - int i; - if(ifree.size()>0) - { i=ifree[ifree.size()-1];ifree.pop_back();stock[i]=value; } + int i; + if (ifree.size() > 0) + { + i = ifree[ifree.size() - 1]; + ifree.pop_back(); + stock[i] = value; + } else - { i=stock.size();stock.push_back(value); } - return(i); + { + i = stock.size(); + stock.push_back(value); + } + return (i); } // private: - btDbvt(const btDbvt&) {} + btDbvt(const btDbvt&) {} }; // @@ -408,227 +416,252 @@ private: // // -inline btDbvtAabbMm btDbvtAabbMm::FromCE(const btVector3& c,const btVector3& e) +inline btDbvtAabbMm btDbvtAabbMm::FromCE(const btVector3& c, const btVector3& e) { btDbvtAabbMm box; - box.mi=c-e;box.mx=c+e; - return(box); + box.mi = c - e; + box.mx = c + e; + return (box); } // -inline btDbvtAabbMm btDbvtAabbMm::FromCR(const btVector3& c,btScalar r) +inline btDbvtAabbMm btDbvtAabbMm::FromCR(const btVector3& c, btScalar r) { - return(FromCE(c,btVector3(r,r,r))); + return (FromCE(c, btVector3(r, r, r))); } // -inline btDbvtAabbMm btDbvtAabbMm::FromMM(const btVector3& mi,const btVector3& mx) +inline btDbvtAabbMm btDbvtAabbMm::FromMM(const btVector3& mi, const btVector3& mx) { btDbvtAabbMm box; - box.mi=mi;box.mx=mx; - return(box); + box.mi = mi; + box.mx = mx; + return (box); } // -inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3* pts,int n) +inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3* pts, int n) { btDbvtAabbMm box; - box.mi=box.mx=pts[0]; - for(int i=1;i<n;++i) + box.mi = box.mx = pts[0]; + for (int i = 1; i < n; ++i) { box.mi.setMin(pts[i]); box.mx.setMax(pts[i]); } - return(box); + return (box); } // -inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3** ppts,int n) +inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3** ppts, int n) { btDbvtAabbMm box; - box.mi=box.mx=*ppts[0]; - for(int i=1;i<n;++i) + box.mi = box.mx = *ppts[0]; + for (int i = 1; i < n; ++i) { box.mi.setMin(*ppts[i]); box.mx.setMax(*ppts[i]); } - return(box); + return (box); } // -DBVT_INLINE void btDbvtAabbMm::Expand(const btVector3& e) +DBVT_INLINE void btDbvtAabbMm::Expand(const btVector3& e) { - mi-=e;mx+=e; + mi -= e; + mx += e; } // -DBVT_INLINE void btDbvtAabbMm::SignedExpand(const btVector3& e) +DBVT_INLINE void btDbvtAabbMm::SignedExpand(const btVector3& e) { - if(e.x()>0) mx.setX(mx.x()+e[0]); else mi.setX(mi.x()+e[0]); - if(e.y()>0) mx.setY(mx.y()+e[1]); else mi.setY(mi.y()+e[1]); - if(e.z()>0) mx.setZ(mx.z()+e[2]); else mi.setZ(mi.z()+e[2]); + if (e.x() > 0) + mx.setX(mx.x() + e[0]); + else + mi.setX(mi.x() + e[0]); + if (e.y() > 0) + mx.setY(mx.y() + e[1]); + else + mi.setY(mi.y() + e[1]); + if (e.z() > 0) + mx.setZ(mx.z() + e[2]); + else + mi.setZ(mi.z() + e[2]); } // -DBVT_INLINE bool btDbvtAabbMm::Contain(const btDbvtAabbMm& a) const +DBVT_INLINE bool btDbvtAabbMm::Contain(const btDbvtAabbMm& a) const { - return( (mi.x()<=a.mi.x())&& - (mi.y()<=a.mi.y())&& - (mi.z()<=a.mi.z())&& - (mx.x()>=a.mx.x())&& - (mx.y()>=a.mx.y())&& - (mx.z()>=a.mx.z())); + return ((mi.x() <= a.mi.x()) && + (mi.y() <= a.mi.y()) && + (mi.z() <= a.mi.z()) && + (mx.x() >= a.mx.x()) && + (mx.y() >= a.mx.y()) && + (mx.z() >= a.mx.z())); } // -DBVT_INLINE int btDbvtAabbMm::Classify(const btVector3& n,btScalar o,int s) const +DBVT_INLINE int btDbvtAabbMm::Classify(const btVector3& n, btScalar o, int s) const { - btVector3 pi,px; - switch(s) + btVector3 pi, px; + switch (s) { - case (0+0+0): px=btVector3(mi.x(),mi.y(),mi.z()); - pi=btVector3(mx.x(),mx.y(),mx.z());break; - case (1+0+0): px=btVector3(mx.x(),mi.y(),mi.z()); - pi=btVector3(mi.x(),mx.y(),mx.z());break; - case (0+2+0): px=btVector3(mi.x(),mx.y(),mi.z()); - pi=btVector3(mx.x(),mi.y(),mx.z());break; - case (1+2+0): px=btVector3(mx.x(),mx.y(),mi.z()); - pi=btVector3(mi.x(),mi.y(),mx.z());break; - case (0+0+4): px=btVector3(mi.x(),mi.y(),mx.z()); - pi=btVector3(mx.x(),mx.y(),mi.z());break; - case (1+0+4): px=btVector3(mx.x(),mi.y(),mx.z()); - pi=btVector3(mi.x(),mx.y(),mi.z());break; - case (0+2+4): px=btVector3(mi.x(),mx.y(),mx.z()); - pi=btVector3(mx.x(),mi.y(),mi.z());break; - case (1+2+4): px=btVector3(mx.x(),mx.y(),mx.z()); - pi=btVector3(mi.x(),mi.y(),mi.z());break; + case (0 + 0 + 0): + px = btVector3(mi.x(), mi.y(), mi.z()); + pi = btVector3(mx.x(), mx.y(), mx.z()); + break; + case (1 + 0 + 0): + px = btVector3(mx.x(), mi.y(), mi.z()); + pi = btVector3(mi.x(), mx.y(), mx.z()); + break; + case (0 + 2 + 0): + px = btVector3(mi.x(), mx.y(), mi.z()); + pi = btVector3(mx.x(), mi.y(), mx.z()); + break; + case (1 + 2 + 0): + px = btVector3(mx.x(), mx.y(), mi.z()); + pi = btVector3(mi.x(), mi.y(), mx.z()); + break; + case (0 + 0 + 4): + px = btVector3(mi.x(), mi.y(), mx.z()); + pi = btVector3(mx.x(), mx.y(), mi.z()); + break; + case (1 + 0 + 4): + px = btVector3(mx.x(), mi.y(), mx.z()); + pi = btVector3(mi.x(), mx.y(), mi.z()); + break; + case (0 + 2 + 4): + px = btVector3(mi.x(), mx.y(), mx.z()); + pi = btVector3(mx.x(), mi.y(), mi.z()); + break; + case (1 + 2 + 4): + px = btVector3(mx.x(), mx.y(), mx.z()); + pi = btVector3(mi.x(), mi.y(), mi.z()); + break; } - if((btDot(n,px)+o)<0) return(-1); - if((btDot(n,pi)+o)>=0) return(+1); - return(0); + if ((btDot(n, px) + o) < 0) return (-1); + if ((btDot(n, pi) + o) >= 0) return (+1); + return (0); } // -DBVT_INLINE btScalar btDbvtAabbMm::ProjectMinimum(const btVector3& v,unsigned signs) const +DBVT_INLINE btScalar btDbvtAabbMm::ProjectMinimum(const btVector3& v, unsigned signs) const { - const btVector3* b[]={&mx,&mi}; - const btVector3 p( b[(signs>>0)&1]->x(), - b[(signs>>1)&1]->y(), - b[(signs>>2)&1]->z()); - return(btDot(p,v)); + const btVector3* b[] = {&mx, &mi}; + const btVector3 p(b[(signs >> 0) & 1]->x(), + b[(signs >> 1) & 1]->y(), + b[(signs >> 2) & 1]->z()); + return (btDot(p, v)); } // -DBVT_INLINE void btDbvtAabbMm::AddSpan(const btVector3& d,btScalar& smi,btScalar& smx) const +DBVT_INLINE void btDbvtAabbMm::AddSpan(const btVector3& d, btScalar& smi, btScalar& smx) const { - for(int i=0;i<3;++i) + for (int i = 0; i < 3; ++i) { - if(d[i]<0) - { smi+=mx[i]*d[i];smx+=mi[i]*d[i]; } + if (d[i] < 0) + { + smi += mx[i] * d[i]; + smx += mi[i] * d[i]; + } else - { smi+=mi[i]*d[i];smx+=mx[i]*d[i]; } + { + smi += mi[i] * d[i]; + smx += mx[i] * d[i]; + } } } // -DBVT_INLINE bool Intersect( const btDbvtAabbMm& a, - const btDbvtAabbMm& b) +DBVT_INLINE bool Intersect(const btDbvtAabbMm& a, + const btDbvtAabbMm& b) { -#if DBVT_INT0_IMPL == DBVT_IMPL_SSE - const __m128 rt(_mm_or_ps( _mm_cmplt_ps(_mm_load_ps(b.mx),_mm_load_ps(a.mi)), - _mm_cmplt_ps(_mm_load_ps(a.mx),_mm_load_ps(b.mi)))); -#if defined (_WIN32) - const __int32* pu((const __int32*)&rt); +#if DBVT_INT0_IMPL == DBVT_IMPL_SSE + const __m128 rt(_mm_or_ps(_mm_cmplt_ps(_mm_load_ps(b.mx), _mm_load_ps(a.mi)), + _mm_cmplt_ps(_mm_load_ps(a.mx), _mm_load_ps(b.mi)))); +#if defined(_WIN32) + const __int32* pu((const __int32*)&rt); #else - const int* pu((const int*)&rt); + const int* pu((const int*)&rt); #endif - return((pu[0]|pu[1]|pu[2])==0); + return ((pu[0] | pu[1] | pu[2]) == 0); #else - return( (a.mi.x()<=b.mx.x())&& - (a.mx.x()>=b.mi.x())&& - (a.mi.y()<=b.mx.y())&& - (a.mx.y()>=b.mi.y())&& - (a.mi.z()<=b.mx.z())&& - (a.mx.z()>=b.mi.z())); + return ((a.mi.x() <= b.mx.x()) && + (a.mx.x() >= b.mi.x()) && + (a.mi.y() <= b.mx.y()) && + (a.mx.y() >= b.mi.y()) && + (a.mi.z() <= b.mx.z()) && + (a.mx.z() >= b.mi.z())); #endif } - - // -DBVT_INLINE bool Intersect( const btDbvtAabbMm& a, - const btVector3& b) +DBVT_INLINE bool Intersect(const btDbvtAabbMm& a, + const btVector3& b) { - return( (b.x()>=a.mi.x())&& - (b.y()>=a.mi.y())&& - (b.z()>=a.mi.z())&& - (b.x()<=a.mx.x())&& - (b.y()<=a.mx.y())&& - (b.z()<=a.mx.z())); + return ((b.x() >= a.mi.x()) && + (b.y() >= a.mi.y()) && + (b.z() >= a.mi.z()) && + (b.x() <= a.mx.x()) && + (b.y() <= a.mx.y()) && + (b.z() <= a.mx.z())); } - - - - ////////////////////////////////////// - // -DBVT_INLINE btScalar Proximity( const btDbvtAabbMm& a, - const btDbvtAabbMm& b) +DBVT_INLINE btScalar Proximity(const btDbvtAabbMm& a, + const btDbvtAabbMm& b) { - const btVector3 d=(a.mi+a.mx)-(b.mi+b.mx); - return(btFabs(d.x())+btFabs(d.y())+btFabs(d.z())); + const btVector3 d = (a.mi + a.mx) - (b.mi + b.mx); + return (btFabs(d.x()) + btFabs(d.y()) + btFabs(d.z())); } - - // -DBVT_INLINE int Select( const btDbvtAabbMm& o, - const btDbvtAabbMm& a, - const btDbvtAabbMm& b) +DBVT_INLINE int Select(const btDbvtAabbMm& o, + const btDbvtAabbMm& a, + const btDbvtAabbMm& b) { -#if DBVT_SELECT_IMPL == DBVT_IMPL_SSE - -#if defined (_WIN32) - static ATTRIBUTE_ALIGNED16(const unsigned __int32) mask[]={0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff}; +#if DBVT_SELECT_IMPL == DBVT_IMPL_SSE + +#if defined(_WIN32) + static ATTRIBUTE_ALIGNED16(const unsigned __int32) mask[] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff}; #else - static ATTRIBUTE_ALIGNED16(const unsigned int) mask[]={0x7fffffff,0x7fffffff,0x7fffffff,0x00000000 /*0x7fffffff*/}; + static ATTRIBUTE_ALIGNED16(const unsigned int) mask[] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x00000000 /*0x7fffffff*/}; #endif ///@todo: the intrinsic version is 11% slower #if DBVT_USE_INTRINSIC_SSE - union btSSEUnion ///NOTE: if we use more intrinsics, move btSSEUnion into the LinearMath directory + union btSSEUnion ///NOTE: if we use more intrinsics, move btSSEUnion into the LinearMath directory { - __m128 ssereg; - float floats[4]; - int ints[4]; + __m128 ssereg; + float floats[4]; + int ints[4]; }; - __m128 omi(_mm_load_ps(o.mi)); - omi=_mm_add_ps(omi,_mm_load_ps(o.mx)); - __m128 ami(_mm_load_ps(a.mi)); - ami=_mm_add_ps(ami,_mm_load_ps(a.mx)); - ami=_mm_sub_ps(ami,omi); - ami=_mm_and_ps(ami,_mm_load_ps((const float*)mask)); - __m128 bmi(_mm_load_ps(b.mi)); - bmi=_mm_add_ps(bmi,_mm_load_ps(b.mx)); - bmi=_mm_sub_ps(bmi,omi); - bmi=_mm_and_ps(bmi,_mm_load_ps((const float*)mask)); - __m128 t0(_mm_movehl_ps(ami,ami)); - ami=_mm_add_ps(ami,t0); - ami=_mm_add_ss(ami,_mm_shuffle_ps(ami,ami,1)); - __m128 t1(_mm_movehl_ps(bmi,bmi)); - bmi=_mm_add_ps(bmi,t1); - bmi=_mm_add_ss(bmi,_mm_shuffle_ps(bmi,bmi,1)); - + __m128 omi(_mm_load_ps(o.mi)); + omi = _mm_add_ps(omi, _mm_load_ps(o.mx)); + __m128 ami(_mm_load_ps(a.mi)); + ami = _mm_add_ps(ami, _mm_load_ps(a.mx)); + ami = _mm_sub_ps(ami, omi); + ami = _mm_and_ps(ami, _mm_load_ps((const float*)mask)); + __m128 bmi(_mm_load_ps(b.mi)); + bmi = _mm_add_ps(bmi, _mm_load_ps(b.mx)); + bmi = _mm_sub_ps(bmi, omi); + bmi = _mm_and_ps(bmi, _mm_load_ps((const float*)mask)); + __m128 t0(_mm_movehl_ps(ami, ami)); + ami = _mm_add_ps(ami, t0); + ami = _mm_add_ss(ami, _mm_shuffle_ps(ami, ami, 1)); + __m128 t1(_mm_movehl_ps(bmi, bmi)); + bmi = _mm_add_ps(bmi, t1); + bmi = _mm_add_ss(bmi, _mm_shuffle_ps(bmi, bmi, 1)); + btSSEUnion tmp; - tmp.ssereg = _mm_cmple_ss(bmi,ami); - return tmp.ints[0]&1; + tmp.ssereg = _mm_cmple_ss(bmi, ami); + return tmp.ints[0] & 1; #else - ATTRIBUTE_ALIGNED16(__int32 r[1]); + ATTRIBUTE_ALIGNED16(__int32 r[1]); __asm { mov eax,o @@ -656,46 +689,52 @@ DBVT_INLINE int Select( const btDbvtAabbMm& o, cmpless xmm2,xmm1 movss r,xmm2 } - return(r[0]&1); + return (r[0] & 1); #endif #else - return(Proximity(o,a)<Proximity(o,b)?0:1); + return (Proximity(o, a) < Proximity(o, b) ? 0 : 1); #endif } // -DBVT_INLINE void Merge( const btDbvtAabbMm& a, - const btDbvtAabbMm& b, - btDbvtAabbMm& r) +DBVT_INLINE void Merge(const btDbvtAabbMm& a, + const btDbvtAabbMm& b, + btDbvtAabbMm& r) { -#if DBVT_MERGE_IMPL==DBVT_IMPL_SSE - __m128 ami(_mm_load_ps(a.mi)); - __m128 amx(_mm_load_ps(a.mx)); - __m128 bmi(_mm_load_ps(b.mi)); - __m128 bmx(_mm_load_ps(b.mx)); - ami=_mm_min_ps(ami,bmi); - amx=_mm_max_ps(amx,bmx); - _mm_store_ps(r.mi,ami); - _mm_store_ps(r.mx,amx); +#if DBVT_MERGE_IMPL == DBVT_IMPL_SSE + __m128 ami(_mm_load_ps(a.mi)); + __m128 amx(_mm_load_ps(a.mx)); + __m128 bmi(_mm_load_ps(b.mi)); + __m128 bmx(_mm_load_ps(b.mx)); + ami = _mm_min_ps(ami, bmi); + amx = _mm_max_ps(amx, bmx); + _mm_store_ps(r.mi, ami); + _mm_store_ps(r.mx, amx); #else - for(int i=0;i<3;++i) + for (int i = 0; i < 3; ++i) { - if(a.mi[i]<b.mi[i]) r.mi[i]=a.mi[i]; else r.mi[i]=b.mi[i]; - if(a.mx[i]>b.mx[i]) r.mx[i]=a.mx[i]; else r.mx[i]=b.mx[i]; + if (a.mi[i] < b.mi[i]) + r.mi[i] = a.mi[i]; + else + r.mi[i] = b.mi[i]; + if (a.mx[i] > b.mx[i]) + r.mx[i] = a.mx[i]; + else + r.mx[i] = b.mx[i]; } #endif } // -DBVT_INLINE bool NotEqual( const btDbvtAabbMm& a, - const btDbvtAabbMm& b) +DBVT_INLINE bool NotEqual(const btDbvtAabbMm& a, + const btDbvtAabbMm& b) { - return( (a.mi.x()!=b.mi.x())|| - (a.mi.y()!=b.mi.y())|| - (a.mi.z()!=b.mi.z())|| - (a.mx.x()!=b.mx.x())|| - (a.mx.y()!=b.mx.y())|| - (a.mx.z()!=b.mx.z())); + return ((a.mi.x() != b.mi.x()) || + (a.mi.y() != b.mi.y()) || + (a.mi.z() != b.mi.z()) || + (a.mx.x() != b.mx.x()) || + (a.mx.y() != b.mx.y()) || + (a.mx.z() != b.mx.z())); } // @@ -704,162 +743,162 @@ DBVT_INLINE bool NotEqual( const btDbvtAabbMm& a, // DBVT_PREFIX -inline void btDbvt::enumNodes( const btDbvtNode* root, - DBVT_IPOLICY) +inline void btDbvt::enumNodes(const btDbvtNode* root, + DBVT_IPOLICY) { DBVT_CHECKTYPE - policy.Process(root); - if(root->isinternal()) + policy.Process(root); + if (root->isinternal()) { - enumNodes(root->childs[0],policy); - enumNodes(root->childs[1],policy); + enumNodes(root->childs[0], policy); + enumNodes(root->childs[1], policy); } } // DBVT_PREFIX -inline void btDbvt::enumLeaves( const btDbvtNode* root, - DBVT_IPOLICY) +inline void btDbvt::enumLeaves(const btDbvtNode* root, + DBVT_IPOLICY) { DBVT_CHECKTYPE - if(root->isinternal()) - { - enumLeaves(root->childs[0],policy); - enumLeaves(root->childs[1],policy); - } - else - { - policy.Process(root); - } + if (root->isinternal()) + { + enumLeaves(root->childs[0], policy); + enumLeaves(root->childs[1], policy); + } + else + { + policy.Process(root); + } } // DBVT_PREFIX -inline void btDbvt::collideTT( const btDbvtNode* root0, - const btDbvtNode* root1, - DBVT_IPOLICY) +inline void btDbvt::collideTT(const btDbvtNode* root0, + const btDbvtNode* root1, + DBVT_IPOLICY) { DBVT_CHECKTYPE - if(root0&&root1) + if (root0 && root1) + { + int depth = 1; + int treshold = DOUBLE_STACKSIZE - 4; + btAlignedObjectArray<sStkNN> stkStack; + stkStack.resize(DOUBLE_STACKSIZE); + stkStack[0] = sStkNN(root0, root1); + do { - int depth=1; - int treshold=DOUBLE_STACKSIZE-4; - btAlignedObjectArray<sStkNN> stkStack; - stkStack.resize(DOUBLE_STACKSIZE); - stkStack[0]=sStkNN(root0,root1); - do { - sStkNN p=stkStack[--depth]; - if(depth>treshold) + sStkNN p = stkStack[--depth]; + if (depth > treshold) + { + stkStack.resize(stkStack.size() * 2); + treshold = stkStack.size() - 4; + } + if (p.a == p.b) + { + if (p.a->isinternal()) { - stkStack.resize(stkStack.size()*2); - treshold=stkStack.size()-4; + stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[0]); + stkStack[depth++] = sStkNN(p.a->childs[1], p.a->childs[1]); + stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[1]); } - if(p.a==p.b) + } + else if (Intersect(p.a->volume, p.b->volume)) + { + if (p.a->isinternal()) { - if(p.a->isinternal()) + if (p.b->isinternal()) + { + stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[0]); + stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[0]); + stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[1]); + stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[1]); + } + else { - stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[0]); - stkStack[depth++]=sStkNN(p.a->childs[1],p.a->childs[1]); - stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[1]); + stkStack[depth++] = sStkNN(p.a->childs[0], p.b); + stkStack[depth++] = sStkNN(p.a->childs[1], p.b); } } - else if(Intersect(p.a->volume,p.b->volume)) + else { - if(p.a->isinternal()) + if (p.b->isinternal()) { - if(p.b->isinternal()) - { - stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[0]); - stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[0]); - stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[1]); - stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[1]); - } - else - { - stkStack[depth++]=sStkNN(p.a->childs[0],p.b); - stkStack[depth++]=sStkNN(p.a->childs[1],p.b); - } + stkStack[depth++] = sStkNN(p.a, p.b->childs[0]); + stkStack[depth++] = sStkNN(p.a, p.b->childs[1]); } else { - if(p.b->isinternal()) - { - stkStack[depth++]=sStkNN(p.a,p.b->childs[0]); - stkStack[depth++]=sStkNN(p.a,p.b->childs[1]); - } - else - { - policy.Process(p.a,p.b); - } + policy.Process(p.a, p.b); } } - } while(depth); - } + } + } while (depth); + } } - - DBVT_PREFIX -inline void btDbvt::collideTTpersistentStack( const btDbvtNode* root0, - const btDbvtNode* root1, - DBVT_IPOLICY) +inline void btDbvt::collideTTpersistentStack(const btDbvtNode* root0, + const btDbvtNode* root1, + DBVT_IPOLICY) { DBVT_CHECKTYPE - if(root0&&root1) + if (root0 && root1) + { + int depth = 1; + int treshold = DOUBLE_STACKSIZE - 4; + + m_stkStack.resize(DOUBLE_STACKSIZE); + m_stkStack[0] = sStkNN(root0, root1); + do { - int depth=1; - int treshold=DOUBLE_STACKSIZE-4; - - m_stkStack.resize(DOUBLE_STACKSIZE); - m_stkStack[0]=sStkNN(root0,root1); - do { - sStkNN p=m_stkStack[--depth]; - if(depth>treshold) + sStkNN p = m_stkStack[--depth]; + if (depth > treshold) + { + m_stkStack.resize(m_stkStack.size() * 2); + treshold = m_stkStack.size() - 4; + } + if (p.a == p.b) + { + if (p.a->isinternal()) { - m_stkStack.resize(m_stkStack.size()*2); - treshold=m_stkStack.size()-4; + m_stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[0]); + m_stkStack[depth++] = sStkNN(p.a->childs[1], p.a->childs[1]); + m_stkStack[depth++] = sStkNN(p.a->childs[0], p.a->childs[1]); } - if(p.a==p.b) + } + else if (Intersect(p.a->volume, p.b->volume)) + { + if (p.a->isinternal()) { - if(p.a->isinternal()) + if (p.b->isinternal()) { - m_stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[0]); - m_stkStack[depth++]=sStkNN(p.a->childs[1],p.a->childs[1]); - m_stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[1]); + m_stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[0]); + m_stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[0]); + m_stkStack[depth++] = sStkNN(p.a->childs[0], p.b->childs[1]); + m_stkStack[depth++] = sStkNN(p.a->childs[1], p.b->childs[1]); + } + else + { + m_stkStack[depth++] = sStkNN(p.a->childs[0], p.b); + m_stkStack[depth++] = sStkNN(p.a->childs[1], p.b); } } - else if(Intersect(p.a->volume,p.b->volume)) + else { - if(p.a->isinternal()) + if (p.b->isinternal()) { - if(p.b->isinternal()) - { - m_stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[0]); - m_stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[0]); - m_stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[1]); - m_stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[1]); - } - else - { - m_stkStack[depth++]=sStkNN(p.a->childs[0],p.b); - m_stkStack[depth++]=sStkNN(p.a->childs[1],p.b); - } + m_stkStack[depth++] = sStkNN(p.a, p.b->childs[0]); + m_stkStack[depth++] = sStkNN(p.a, p.b->childs[1]); } else { - if(p.b->isinternal()) - { - m_stkStack[depth++]=sStkNN(p.a,p.b->childs[0]); - m_stkStack[depth++]=sStkNN(p.a,p.b->childs[1]); - } - else - { - policy.Process(p.a,p.b); - } + policy.Process(p.a, p.b); } } - } while(depth); - } + } + } while (depth); + } } #if 0 @@ -929,33 +968,35 @@ inline void btDbvt::collideTT( const btDbvtNode* root0, const btTransform xform=xform0.inverse()*xform1; collideTT(root0,root1,xform,policy); } -#endif +#endif DBVT_PREFIX -inline void btDbvt::collideTV( const btDbvtNode* root, - const btDbvtVolume& vol, - DBVT_IPOLICY) const +inline void btDbvt::collideTV(const btDbvtNode* root, + const btDbvtVolume& vol, + DBVT_IPOLICY) const { DBVT_CHECKTYPE - if(root) + if (root) { - ATTRIBUTE_ALIGNED16(btDbvtVolume) volume(vol); - btAlignedObjectArray<const btDbvtNode*> stack; + ATTRIBUTE_ALIGNED16(btDbvtVolume) + volume(vol); + btAlignedObjectArray<const btDbvtNode*> stack; stack.resize(0); #ifndef BT_DISABLE_STACK_TEMP_MEMORY - char tempmemory[SIMPLE_STACKSIZE*sizeof(const btDbvtNode*)]; + char tempmemory[SIMPLE_STACKSIZE * sizeof(const btDbvtNode*)]; stack.initializeFromBuffer(tempmemory, 0, SIMPLE_STACKSIZE); #else stack.reserve(SIMPLE_STACKSIZE); -#endif //BT_DISABLE_STACK_TEMP_MEMORY +#endif //BT_DISABLE_STACK_TEMP_MEMORY stack.push_back(root); - do { - const btDbvtNode* n=stack[stack.size()-1]; + do + { + const btDbvtNode* n = stack[stack.size() - 1]; stack.pop_back(); - if(Intersect(n->volume,volume)) + if (Intersect(n->volume, volume)) { - if(n->isinternal()) + if (n->isinternal()) { stack.push_back(n->childs[0]); stack.push_back(n->childs[1]); @@ -965,30 +1006,32 @@ inline void btDbvt::collideTV( const btDbvtNode* root, policy.Process(n); } } - } while(stack.size()>0); + } while (stack.size() > 0); } } // DBVT_PREFIX -inline void btDbvt::collideTVNoStackAlloc( const btDbvtNode* root, - const btDbvtVolume& vol, - btNodeStack& stack, - DBVT_IPOLICY) const +inline void btDbvt::collideTVNoStackAlloc(const btDbvtNode* root, + const btDbvtVolume& vol, + btNodeStack& stack, + DBVT_IPOLICY) const { DBVT_CHECKTYPE - if(root) + if (root) { - ATTRIBUTE_ALIGNED16(btDbvtVolume) volume(vol); + ATTRIBUTE_ALIGNED16(btDbvtVolume) + volume(vol); stack.resize(0); stack.reserve(SIMPLE_STACKSIZE); stack.push_back(root); - do { - const btDbvtNode* n=stack[stack.size()-1]; + do + { + const btDbvtNode* n = stack[stack.size() - 1]; stack.pop_back(); - if(Intersect(n->volume,volume)) + if (Intersect(n->volume, volume)) { - if(n->isinternal()) + if (n->isinternal()) { stack.push_back(n->childs[0]); stack.push_back(n->childs[1]); @@ -998,328 +1041,346 @@ inline void btDbvt::collideTVNoStackAlloc( const btDbvtNode* root, policy.Process(n); } } - } while(stack.size()>0); + } while (stack.size() > 0); } } - DBVT_PREFIX -inline void btDbvt::rayTestInternal( const btDbvtNode* root, - const btVector3& rayFrom, - const btVector3& rayTo, - const btVector3& rayDirectionInverse, - unsigned int signs[3], - btScalar lambda_max, - const btVector3& aabbMin, - const btVector3& aabbMax, - btAlignedObjectArray<const btDbvtNode*>& stack, - DBVT_IPOLICY ) const +inline void btDbvt::rayTestInternal(const btDbvtNode* root, + const btVector3& rayFrom, + const btVector3& rayTo, + const btVector3& rayDirectionInverse, + unsigned int signs[3], + btScalar lambda_max, + const btVector3& aabbMin, + const btVector3& aabbMax, + btAlignedObjectArray<const btDbvtNode*>& stack, + DBVT_IPOLICY) const { - (void) rayTo; + (void)rayTo; DBVT_CHECKTYPE - if(root) + if (root) { btVector3 resultNormal; - int depth=1; - int treshold=DOUBLE_STACKSIZE-2; + int depth = 1; + int treshold = DOUBLE_STACKSIZE - 2; stack.resize(DOUBLE_STACKSIZE); - stack[0]=root; + stack[0] = root; btVector3 bounds[2]; - do + do { - const btDbvtNode* node=stack[--depth]; - bounds[0] = node->volume.Mins()-aabbMax; - bounds[1] = node->volume.Maxs()-aabbMin; - btScalar tmin=1.f,lambda_min=0.f; - unsigned int result1=false; - result1 = btRayAabb2(rayFrom,rayDirectionInverse,signs,bounds,tmin,lambda_min,lambda_max); - if(result1) + const btDbvtNode* node = stack[--depth]; + bounds[0] = node->volume.Mins() - aabbMax; + bounds[1] = node->volume.Maxs() - aabbMin; + btScalar tmin = 1.f, lambda_min = 0.f; + unsigned int result1 = false; + result1 = btRayAabb2(rayFrom, rayDirectionInverse, signs, bounds, tmin, lambda_min, lambda_max); + if (result1) { - if(node->isinternal()) + if (node->isinternal()) { - if(depth>treshold) + if (depth > treshold) { - stack.resize(stack.size()*2); - treshold=stack.size()-2; + stack.resize(stack.size() * 2); + treshold = stack.size() - 2; } - stack[depth++]=node->childs[0]; - stack[depth++]=node->childs[1]; + stack[depth++] = node->childs[0]; + stack[depth++] = node->childs[1]; } else { policy.Process(node); } } - } while(depth); + } while (depth); } } // DBVT_PREFIX -inline void btDbvt::rayTest( const btDbvtNode* root, - const btVector3& rayFrom, - const btVector3& rayTo, - DBVT_IPOLICY) +inline void btDbvt::rayTest(const btDbvtNode* root, + const btVector3& rayFrom, + const btVector3& rayTo, + DBVT_IPOLICY) { DBVT_CHECKTYPE - if(root) - { - btVector3 rayDir = (rayTo-rayFrom); - rayDir.normalize (); + if (root) + { + btVector3 rayDir = (rayTo - rayFrom); + rayDir.normalize(); - ///what about division by zero? --> just set rayDirection[i] to INF/BT_LARGE_FLOAT - btVector3 rayDirectionInverse; - rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0]; - rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1]; - rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2]; - unsigned int signs[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0}; + ///what about division by zero? --> just set rayDirection[i] to INF/BT_LARGE_FLOAT + btVector3 rayDirectionInverse; + rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0]; + rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1]; + rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2]; + unsigned int signs[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0}; - btScalar lambda_max = rayDir.dot(rayTo-rayFrom); + btScalar lambda_max = rayDir.dot(rayTo - rayFrom); - btVector3 resultNormal; + btVector3 resultNormal; - btAlignedObjectArray<const btDbvtNode*> stack; + btAlignedObjectArray<const btDbvtNode*> stack; - int depth=1; - int treshold=DOUBLE_STACKSIZE-2; + int depth = 1; + int treshold = DOUBLE_STACKSIZE - 2; - char tempmemory[DOUBLE_STACKSIZE * sizeof(const btDbvtNode*)]; + char tempmemory[DOUBLE_STACKSIZE * sizeof(const btDbvtNode*)]; #ifndef BT_DISABLE_STACK_TEMP_MEMORY - stack.initializeFromBuffer(tempmemory, DOUBLE_STACKSIZE, DOUBLE_STACKSIZE); -#else//BT_DISABLE_STACK_TEMP_MEMORY - stack.resize(DOUBLE_STACKSIZE); -#endif //BT_DISABLE_STACK_TEMP_MEMORY - stack[0]=root; - btVector3 bounds[2]; - do { - const btDbvtNode* node=stack[--depth]; + stack.initializeFromBuffer(tempmemory, DOUBLE_STACKSIZE, DOUBLE_STACKSIZE); +#else //BT_DISABLE_STACK_TEMP_MEMORY + stack.resize(DOUBLE_STACKSIZE); +#endif //BT_DISABLE_STACK_TEMP_MEMORY + stack[0] = root; + btVector3 bounds[2]; + do + { + const btDbvtNode* node = stack[--depth]; + + bounds[0] = node->volume.Mins(); + bounds[1] = node->volume.Maxs(); - bounds[0] = node->volume.Mins(); - bounds[1] = node->volume.Maxs(); - - btScalar tmin=1.f,lambda_min=0.f; - unsigned int result1 = btRayAabb2(rayFrom,rayDirectionInverse,signs,bounds,tmin,lambda_min,lambda_max); + btScalar tmin = 1.f, lambda_min = 0.f; + unsigned int result1 = btRayAabb2(rayFrom, rayDirectionInverse, signs, bounds, tmin, lambda_min, lambda_max); #ifdef COMPARE_BTRAY_AABB2 - btScalar param=1.f; - bool result2 = btRayAabb(rayFrom,rayTo,node->volume.Mins(),node->volume.Maxs(),param,resultNormal); - btAssert(result1 == result2); -#endif //TEST_BTRAY_AABB2 + btScalar param = 1.f; + bool result2 = btRayAabb(rayFrom, rayTo, node->volume.Mins(), node->volume.Maxs(), param, resultNormal); + btAssert(result1 == result2); +#endif //TEST_BTRAY_AABB2 - if(result1) + if (result1) + { + if (node->isinternal()) { - if(node->isinternal()) + if (depth > treshold) { - if(depth>treshold) - { - stack.resize(stack.size()*2); - treshold=stack.size()-2; - } - stack[depth++]=node->childs[0]; - stack[depth++]=node->childs[1]; - } - else - { - policy.Process(node); + stack.resize(stack.size() * 2); + treshold = stack.size() - 2; } + stack[depth++] = node->childs[0]; + stack[depth++] = node->childs[1]; } - } while(depth); - - } + else + { + policy.Process(node); + } + } + } while (depth); + } } // DBVT_PREFIX -inline void btDbvt::collideKDOP(const btDbvtNode* root, - const btVector3* normals, - const btScalar* offsets, - int count, - DBVT_IPOLICY) +inline void btDbvt::collideKDOP(const btDbvtNode* root, + const btVector3* normals, + const btScalar* offsets, + int count, + DBVT_IPOLICY) { DBVT_CHECKTYPE - if(root) + if (root) + { + const int inside = (1 << count) - 1; + btAlignedObjectArray<sStkNP> stack; + int signs[sizeof(unsigned) * 8]; + btAssert(count < int(sizeof(signs) / sizeof(signs[0]))); + for (int i = 0; i < count; ++i) { - const int inside=(1<<count)-1; - btAlignedObjectArray<sStkNP> stack; - int signs[sizeof(unsigned)*8]; - btAssert(count<int (sizeof(signs)/sizeof(signs[0]))); - for(int i=0;i<count;++i) + signs[i] = ((normals[i].x() >= 0) ? 1 : 0) + + ((normals[i].y() >= 0) ? 2 : 0) + + ((normals[i].z() >= 0) ? 4 : 0); + } + stack.reserve(SIMPLE_STACKSIZE); + stack.push_back(sStkNP(root, 0)); + do + { + sStkNP se = stack[stack.size() - 1]; + bool out = false; + stack.pop_back(); + for (int i = 0, j = 1; (!out) && (i < count); ++i, j <<= 1) { - signs[i]= ((normals[i].x()>=0)?1:0)+ - ((normals[i].y()>=0)?2:0)+ - ((normals[i].z()>=0)?4:0); - } - stack.reserve(SIMPLE_STACKSIZE); - stack.push_back(sStkNP(root,0)); - do { - sStkNP se=stack[stack.size()-1]; - bool out=false; - stack.pop_back(); - for(int i=0,j=1;(!out)&&(i<count);++i,j<<=1) + if (0 == (se.mask & j)) { - if(0==(se.mask&j)) + const int side = se.node->volume.Classify(normals[i], offsets[i], signs[i]); + switch (side) { - const int side=se.node->volume.Classify(normals[i],offsets[i],signs[i]); - switch(side) - { - case -1: out=true;break; - case +1: se.mask|=j;break; - } + case -1: + out = true; + break; + case +1: + se.mask |= j; + break; } } - if(!out) + } + if (!out) + { + if ((se.mask != inside) && (se.node->isinternal())) { - if((se.mask!=inside)&&(se.node->isinternal())) - { - stack.push_back(sStkNP(se.node->childs[0],se.mask)); - stack.push_back(sStkNP(se.node->childs[1],se.mask)); - } - else - { - if(policy.AllLeaves(se.node)) enumLeaves(se.node,policy); - } + stack.push_back(sStkNP(se.node->childs[0], se.mask)); + stack.push_back(sStkNP(se.node->childs[1], se.mask)); } - } while(stack.size()); - } + else + { + if (policy.AllLeaves(se.node)) enumLeaves(se.node, policy); + } + } + } while (stack.size()); + } } // DBVT_PREFIX -inline void btDbvt::collideOCL( const btDbvtNode* root, - const btVector3* normals, - const btScalar* offsets, - const btVector3& sortaxis, - int count, - DBVT_IPOLICY, - bool fsort) +inline void btDbvt::collideOCL(const btDbvtNode* root, + const btVector3* normals, + const btScalar* offsets, + const btVector3& sortaxis, + int count, + DBVT_IPOLICY, + bool fsort) { DBVT_CHECKTYPE - if(root) + if (root) + { + const unsigned srtsgns = (sortaxis[0] >= 0 ? 1 : 0) + + (sortaxis[1] >= 0 ? 2 : 0) + + (sortaxis[2] >= 0 ? 4 : 0); + const int inside = (1 << count) - 1; + btAlignedObjectArray<sStkNPS> stock; + btAlignedObjectArray<int> ifree; + btAlignedObjectArray<int> stack; + int signs[sizeof(unsigned) * 8]; + btAssert(count < int(sizeof(signs) / sizeof(signs[0]))); + for (int i = 0; i < count; ++i) { - const unsigned srtsgns=(sortaxis[0]>=0?1:0)+ - (sortaxis[1]>=0?2:0)+ - (sortaxis[2]>=0?4:0); - const int inside=(1<<count)-1; - btAlignedObjectArray<sStkNPS> stock; - btAlignedObjectArray<int> ifree; - btAlignedObjectArray<int> stack; - int signs[sizeof(unsigned)*8]; - btAssert(count<int (sizeof(signs)/sizeof(signs[0]))); - for(int i=0;i<count;++i) + signs[i] = ((normals[i].x() >= 0) ? 1 : 0) + + ((normals[i].y() >= 0) ? 2 : 0) + + ((normals[i].z() >= 0) ? 4 : 0); + } + stock.reserve(SIMPLE_STACKSIZE); + stack.reserve(SIMPLE_STACKSIZE); + ifree.reserve(SIMPLE_STACKSIZE); + stack.push_back(allocate(ifree, stock, sStkNPS(root, 0, root->volume.ProjectMinimum(sortaxis, srtsgns)))); + do + { + const int id = stack[stack.size() - 1]; + sStkNPS se = stock[id]; + stack.pop_back(); + ifree.push_back(id); + if (se.mask != inside) { - signs[i]= ((normals[i].x()>=0)?1:0)+ - ((normals[i].y()>=0)?2:0)+ - ((normals[i].z()>=0)?4:0); - } - stock.reserve(SIMPLE_STACKSIZE); - stack.reserve(SIMPLE_STACKSIZE); - ifree.reserve(SIMPLE_STACKSIZE); - stack.push_back(allocate(ifree,stock,sStkNPS(root,0,root->volume.ProjectMinimum(sortaxis,srtsgns)))); - do { - const int id=stack[stack.size()-1]; - sStkNPS se=stock[id]; - stack.pop_back();ifree.push_back(id); - if(se.mask!=inside) + bool out = false; + for (int i = 0, j = 1; (!out) && (i < count); ++i, j <<= 1) { - bool out=false; - for(int i=0,j=1;(!out)&&(i<count);++i,j<<=1) + if (0 == (se.mask & j)) { - if(0==(se.mask&j)) + const int side = se.node->volume.Classify(normals[i], offsets[i], signs[i]); + switch (side) { - const int side=se.node->volume.Classify(normals[i],offsets[i],signs[i]); - switch(side) - { - case -1: out=true;break; - case +1: se.mask|=j;break; - } + case -1: + out = true; + break; + case +1: + se.mask |= j; + break; } } - if(out) continue; } - if(policy.Descent(se.node)) + if (out) continue; + } + if (policy.Descent(se.node)) + { + if (se.node->isinternal()) { - if(se.node->isinternal()) + const btDbvtNode* pns[] = {se.node->childs[0], se.node->childs[1]}; + sStkNPS nes[] = {sStkNPS(pns[0], se.mask, pns[0]->volume.ProjectMinimum(sortaxis, srtsgns)), + sStkNPS(pns[1], se.mask, pns[1]->volume.ProjectMinimum(sortaxis, srtsgns))}; + const int q = nes[0].value < nes[1].value ? 1 : 0; + int j = stack.size(); + if (fsort && (j > 0)) { - const btDbvtNode* pns[]={ se.node->childs[0],se.node->childs[1]}; - sStkNPS nes[]={ sStkNPS(pns[0],se.mask,pns[0]->volume.ProjectMinimum(sortaxis,srtsgns)), - sStkNPS(pns[1],se.mask,pns[1]->volume.ProjectMinimum(sortaxis,srtsgns))}; - const int q=nes[0].value<nes[1].value?1:0; - int j=stack.size(); - if(fsort&&(j>0)) - { - /* Insert 0 */ - j=nearest(&stack[0],&stock[0],nes[q].value,0,stack.size()); - stack.push_back(0); - - //void * memmove ( void * destination, const void * source, size_t num ); - + /* Insert 0 */ + j = nearest(&stack[0], &stock[0], nes[q].value, 0, stack.size()); + stack.push_back(0); + + //void * memmove ( void * destination, const void * source, size_t num ); + #if DBVT_USE_MEMMOVE - { - int num_items_to_move = stack.size()-1-j; - if(num_items_to_move > 0) - memmove(&stack[j+1],&stack[j],sizeof(int)*num_items_to_move); - } + { + int num_items_to_move = stack.size() - 1 - j; + if (num_items_to_move > 0) + memmove(&stack[j + 1], &stack[j], sizeof(int) * num_items_to_move); + } #else - for(int k=stack.size()-1;k>j;--k) { - stack[k]=stack[k-1]; - } + for (int k = stack.size() - 1; k > j; --k) + { + stack[k] = stack[k - 1]; + } #endif - stack[j]=allocate(ifree,stock,nes[q]); - /* Insert 1 */ - j=nearest(&stack[0],&stock[0],nes[1-q].value,j,stack.size()); - stack.push_back(0); + stack[j] = allocate(ifree, stock, nes[q]); + /* Insert 1 */ + j = nearest(&stack[0], &stock[0], nes[1 - q].value, j, stack.size()); + stack.push_back(0); #if DBVT_USE_MEMMOVE - { - int num_items_to_move = stack.size()-1-j; - if(num_items_to_move > 0) - memmove(&stack[j+1],&stack[j],sizeof(int)*num_items_to_move); - } -#else - for(int k=stack.size()-1;k>j;--k) { - stack[k]=stack[k-1]; - } -#endif - stack[j]=allocate(ifree,stock,nes[1-q]); + { + int num_items_to_move = stack.size() - 1 - j; + if (num_items_to_move > 0) + memmove(&stack[j + 1], &stack[j], sizeof(int) * num_items_to_move); } - else +#else + for (int k = stack.size() - 1; k > j; --k) { - stack.push_back(allocate(ifree,stock,nes[q])); - stack.push_back(allocate(ifree,stock,nes[1-q])); + stack[k] = stack[k - 1]; } +#endif + stack[j] = allocate(ifree, stock, nes[1 - q]); } else { - policy.Process(se.node,se.value); + stack.push_back(allocate(ifree, stock, nes[q])); + stack.push_back(allocate(ifree, stock, nes[1 - q])); } } - } while(stack.size()); - } + else + { + policy.Process(se.node, se.value); + } + } + } while (stack.size()); + } } // DBVT_PREFIX -inline void btDbvt::collideTU( const btDbvtNode* root, - DBVT_IPOLICY) +inline void btDbvt::collideTU(const btDbvtNode* root, + DBVT_IPOLICY) { DBVT_CHECKTYPE - if(root) + if (root) + { + btAlignedObjectArray<const btDbvtNode*> stack; + stack.reserve(SIMPLE_STACKSIZE); + stack.push_back(root); + do { - btAlignedObjectArray<const btDbvtNode*> stack; - stack.reserve(SIMPLE_STACKSIZE); - stack.push_back(root); - do { - const btDbvtNode* n=stack[stack.size()-1]; - stack.pop_back(); - if(policy.Descent(n)) + const btDbvtNode* n = stack[stack.size() - 1]; + stack.pop_back(); + if (policy.Descent(n)) + { + if (n->isinternal()) { - if(n->isinternal()) - { stack.push_back(n->childs[0]);stack.push_back(n->childs[1]); } - else - { policy.Process(n); } + stack.push_back(n->childs[0]); + stack.push_back(n->childs[1]); } - } while(stack.size()>0); - } + else + { + policy.Process(n); + } + } + } while (stack.size() > 0); + } } // diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.cpp index 14cd1a31ea..7b39dbdc0f 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.cpp @@ -22,28 +22,27 @@ btScalar gDbvtMargin = btScalar(0.05); // Profiling // -#if DBVT_BP_PROFILE||DBVT_BP_ENABLE_BENCHMARK +#if DBVT_BP_PROFILE || DBVT_BP_ENABLE_BENCHMARK #include <stdio.h> #endif #if DBVT_BP_PROFILE -struct ProfileScope +struct ProfileScope { - __forceinline ProfileScope(btClock& clock,unsigned long& value) : - m_clock(&clock),m_value(&value),m_base(clock.getTimeMicroseconds()) + __forceinline ProfileScope(btClock& clock, unsigned long& value) : m_clock(&clock), m_value(&value), m_base(clock.getTimeMicroseconds()) { } __forceinline ~ProfileScope() { - (*m_value)+=m_clock->getTimeMicroseconds()-m_base; + (*m_value) += m_clock->getTimeMicroseconds() - m_base; } - btClock* m_clock; - unsigned long* m_value; - unsigned long m_base; + btClock* m_clock; + unsigned long* m_value; + unsigned long m_base; }; -#define SPC(_value_) ProfileScope spc_scope(m_clock,_value_) +#define SPC(_value_) ProfileScope spc_scope(m_clock, _value_) #else -#define SPC(_value_) +#define SPC(_value_) #endif // @@ -52,66 +51,75 @@ struct ProfileScope // template <typename T> -static inline void listappend(T* item,T*& list) +static inline void listappend(T* item, T*& list) { - item->links[0]=0; - item->links[1]=list; - if(list) list->links[0]=item; - list=item; + item->links[0] = 0; + item->links[1] = list; + if (list) list->links[0] = item; + list = item; } // template <typename T> -static inline void listremove(T* item,T*& list) +static inline void listremove(T* item, T*& list) { - if(item->links[0]) item->links[0]->links[1]=item->links[1]; else list=item->links[1]; - if(item->links[1]) item->links[1]->links[0]=item->links[0]; + if (item->links[0]) + item->links[0]->links[1] = item->links[1]; + else + list = item->links[1]; + if (item->links[1]) item->links[1]->links[0] = item->links[0]; } // template <typename T> -static inline int listcount(T* root) +static inline int listcount(T* root) { - int n=0; - while(root) { ++n;root=root->links[1]; } - return(n); + int n = 0; + while (root) + { + ++n; + root = root->links[1]; + } + return (n); } // template <typename T> -static inline void clear(T& value) +static inline void clear(T& value) { - static const struct ZeroDummy : T {} zerodummy; - value=zerodummy; + static const struct ZeroDummy : T + { + } zerodummy; + value = zerodummy; } // // Colliders // -/* Tree collider */ -struct btDbvtTreeCollider : btDbvt::ICollide +/* Tree collider */ +struct btDbvtTreeCollider : btDbvt::ICollide { - btDbvtBroadphase* pbp; - btDbvtProxy* proxy; + btDbvtBroadphase* pbp; + btDbvtProxy* proxy; btDbvtTreeCollider(btDbvtBroadphase* p) : pbp(p) {} - void Process(const btDbvtNode* na,const btDbvtNode* nb) + void Process(const btDbvtNode* na, const btDbvtNode* nb) { - if(na!=nb) + if (na != nb) { - btDbvtProxy* pa=(btDbvtProxy*)na->data; - btDbvtProxy* pb=(btDbvtProxy*)nb->data; + btDbvtProxy* pa = (btDbvtProxy*)na->data; + btDbvtProxy* pb = (btDbvtProxy*)nb->data; #if DBVT_BP_SORTPAIRS - if(pa->m_uniqueId>pb->m_uniqueId) - btSwap(pa,pb); + if (pa->m_uniqueId > pb->m_uniqueId) + btSwap(pa, pb); #endif - pbp->m_paircache->addOverlappingPair(pa,pb); + pbp->m_paircache->addOverlappingPair(pa, pb); ++pbp->m_newpairs; } } - void Process(const btDbvtNode* n) + void Process(const btDbvtNode* n) { - Process(n,proxy->leaf); + Process(n, proxy->leaf); } }; @@ -122,31 +130,31 @@ struct btDbvtTreeCollider : btDbvt::ICollide // btDbvtBroadphase::btDbvtBroadphase(btOverlappingPairCache* paircache) { - m_deferedcollide = false; - m_needcleanup = true; - m_releasepaircache = (paircache!=0)?false:true; - m_prediction = 0; - m_stageCurrent = 0; - m_fixedleft = 0; - m_fupdates = 1; - m_dupdates = 0; - m_cupdates = 10; - m_newpairs = 1; - m_updates_call = 0; - m_updates_done = 0; - m_updates_ratio = 0; - m_paircache = paircache? paircache : new(btAlignedAlloc(sizeof(btHashedOverlappingPairCache),16)) btHashedOverlappingPairCache(); - m_gid = 0; - m_pid = 0; - m_cid = 0; - for(int i=0;i<=STAGECOUNT;++i) + m_deferedcollide = false; + m_needcleanup = true; + m_releasepaircache = (paircache != 0) ? false : true; + m_prediction = 0; + m_stageCurrent = 0; + m_fixedleft = 0; + m_fupdates = 1; + m_dupdates = 0; + m_cupdates = 10; + m_newpairs = 1; + m_updates_call = 0; + m_updates_done = 0; + m_updates_ratio = 0; + m_paircache = paircache ? paircache : new (btAlignedAlloc(sizeof(btHashedOverlappingPairCache), 16)) btHashedOverlappingPairCache(); + m_gid = 0; + m_pid = 0; + m_cid = 0; + for (int i = 0; i <= STAGECOUNT; ++i) { - m_stageRoots[i]=0; + m_stageRoots[i] = 0; } #if BT_THREADSAFE - m_rayTestStacks.resize(BT_MAX_THREAD_COUNT); + m_rayTestStacks.resize(BT_MAX_THREAD_COUNT); #else - m_rayTestStacks.resize(1); + m_rayTestStacks.resize(1); #endif #if DBVT_BP_PROFILE clear(m_profiling); @@ -156,7 +164,7 @@ btDbvtBroadphase::btDbvtBroadphase(btOverlappingPairCache* paircache) // btDbvtBroadphase::~btDbvtBroadphase() { - if(m_releasepaircache) + if (m_releasepaircache) { m_paircache->~btOverlappingPairCache(); btAlignedFree(m_paircache); @@ -164,302 +172,294 @@ btDbvtBroadphase::~btDbvtBroadphase() } // -btBroadphaseProxy* btDbvtBroadphase::createProxy( const btVector3& aabbMin, - const btVector3& aabbMax, - int /*shapeType*/, - void* userPtr, - int collisionFilterGroup, - int collisionFilterMask, - btDispatcher* /*dispatcher*/) +btBroadphaseProxy* btDbvtBroadphase::createProxy(const btVector3& aabbMin, + const btVector3& aabbMax, + int /*shapeType*/, + void* userPtr, + int collisionFilterGroup, + int collisionFilterMask, + btDispatcher* /*dispatcher*/) { - btDbvtProxy* proxy=new(btAlignedAlloc(sizeof(btDbvtProxy),16)) btDbvtProxy( aabbMin,aabbMax,userPtr, - collisionFilterGroup, - collisionFilterMask); + btDbvtProxy* proxy = new (btAlignedAlloc(sizeof(btDbvtProxy), 16)) btDbvtProxy(aabbMin, aabbMax, userPtr, + collisionFilterGroup, + collisionFilterMask); - btDbvtAabbMm aabb = btDbvtVolume::FromMM(aabbMin,aabbMax); + btDbvtAabbMm aabb = btDbvtVolume::FromMM(aabbMin, aabbMax); //bproxy->aabb = btDbvtVolume::FromMM(aabbMin,aabbMax); - proxy->stage = m_stageCurrent; - proxy->m_uniqueId = ++m_gid; - proxy->leaf = m_sets[0].insert(aabb,proxy); - listappend(proxy,m_stageRoots[m_stageCurrent]); - if(!m_deferedcollide) + proxy->stage = m_stageCurrent; + proxy->m_uniqueId = ++m_gid; + proxy->leaf = m_sets[0].insert(aabb, proxy); + listappend(proxy, m_stageRoots[m_stageCurrent]); + if (!m_deferedcollide) { - btDbvtTreeCollider collider(this); - collider.proxy=proxy; - m_sets[0].collideTV(m_sets[0].m_root,aabb,collider); - m_sets[1].collideTV(m_sets[1].m_root,aabb,collider); + btDbvtTreeCollider collider(this); + collider.proxy = proxy; + m_sets[0].collideTV(m_sets[0].m_root, aabb, collider); + m_sets[1].collideTV(m_sets[1].m_root, aabb, collider); } - return(proxy); + return (proxy); } // -void btDbvtBroadphase::destroyProxy( btBroadphaseProxy* absproxy, - btDispatcher* dispatcher) +void btDbvtBroadphase::destroyProxy(btBroadphaseProxy* absproxy, + btDispatcher* dispatcher) { - btDbvtProxy* proxy=(btDbvtProxy*)absproxy; - if(proxy->stage==STAGECOUNT) + btDbvtProxy* proxy = (btDbvtProxy*)absproxy; + if (proxy->stage == STAGECOUNT) m_sets[1].remove(proxy->leaf); else m_sets[0].remove(proxy->leaf); - listremove(proxy,m_stageRoots[proxy->stage]); - m_paircache->removeOverlappingPairsContainingProxy(proxy,dispatcher); + listremove(proxy, m_stageRoots[proxy->stage]); + m_paircache->removeOverlappingPairsContainingProxy(proxy, dispatcher); btAlignedFree(proxy); - m_needcleanup=true; + m_needcleanup = true; } -void btDbvtBroadphase::getAabb(btBroadphaseProxy* absproxy,btVector3& aabbMin, btVector3& aabbMax ) const +void btDbvtBroadphase::getAabb(btBroadphaseProxy* absproxy, btVector3& aabbMin, btVector3& aabbMax) const { - btDbvtProxy* proxy=(btDbvtProxy*)absproxy; + btDbvtProxy* proxy = (btDbvtProxy*)absproxy; aabbMin = proxy->m_aabbMin; aabbMax = proxy->m_aabbMax; } -struct BroadphaseRayTester : btDbvt::ICollide +struct BroadphaseRayTester : btDbvt::ICollide { btBroadphaseRayCallback& m_rayCallback; BroadphaseRayTester(btBroadphaseRayCallback& orgCallback) - :m_rayCallback(orgCallback) + : m_rayCallback(orgCallback) { } - void Process(const btDbvtNode* leaf) + void Process(const btDbvtNode* leaf) { - btDbvtProxy* proxy=(btDbvtProxy*)leaf->data; + btDbvtProxy* proxy = (btDbvtProxy*)leaf->data; m_rayCallback.process(proxy); } -}; +}; -void btDbvtBroadphase::rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback,const btVector3& aabbMin,const btVector3& aabbMax) +void btDbvtBroadphase::rayTest(const btVector3& rayFrom, const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin, const btVector3& aabbMax) { BroadphaseRayTester callback(rayCallback); - btAlignedObjectArray<const btDbvtNode*>* stack = &m_rayTestStacks[0]; + btAlignedObjectArray<const btDbvtNode*>* stack = &m_rayTestStacks[0]; #if BT_THREADSAFE - // for this function to be threadsafe, each thread must have a separate copy - // of this stack. This could be thread-local static to avoid dynamic allocations, - // instead of just a local. - int threadIndex = btGetCurrentThreadIndex(); - btAlignedObjectArray<const btDbvtNode*> localStack; - if (threadIndex < m_rayTestStacks.size()) - { - // use per-thread preallocated stack if possible to avoid dynamic allocations - stack = &m_rayTestStacks[threadIndex]; - } - else - { - stack = &localStack; - } + // for this function to be threadsafe, each thread must have a separate copy + // of this stack. This could be thread-local static to avoid dynamic allocations, + // instead of just a local. + int threadIndex = btGetCurrentThreadIndex(); + btAlignedObjectArray<const btDbvtNode*> localStack; + //todo(erwincoumans, "why do we get tsan issue here?") + if (0)//threadIndex < m_rayTestStacks.size()) + //if (threadIndex < m_rayTestStacks.size()) + { + // use per-thread preallocated stack if possible to avoid dynamic allocations + stack = &m_rayTestStacks[threadIndex]; + } + else + { + stack = &localStack; + } #endif - m_sets[0].rayTestInternal( m_sets[0].m_root, - rayFrom, - rayTo, - rayCallback.m_rayDirectionInverse, - rayCallback.m_signs, - rayCallback.m_lambda_max, - aabbMin, - aabbMax, - *stack, - callback); - - m_sets[1].rayTestInternal( m_sets[1].m_root, - rayFrom, - rayTo, - rayCallback.m_rayDirectionInverse, - rayCallback.m_signs, - rayCallback.m_lambda_max, - aabbMin, - aabbMax, - *stack, - callback); - + m_sets[0].rayTestInternal(m_sets[0].m_root, + rayFrom, + rayTo, + rayCallback.m_rayDirectionInverse, + rayCallback.m_signs, + rayCallback.m_lambda_max, + aabbMin, + aabbMax, + *stack, + callback); + + m_sets[1].rayTestInternal(m_sets[1].m_root, + rayFrom, + rayTo, + rayCallback.m_rayDirectionInverse, + rayCallback.m_signs, + rayCallback.m_lambda_max, + aabbMin, + aabbMax, + *stack, + callback); } - -struct BroadphaseAabbTester : btDbvt::ICollide +struct BroadphaseAabbTester : btDbvt::ICollide { btBroadphaseAabbCallback& m_aabbCallback; BroadphaseAabbTester(btBroadphaseAabbCallback& orgCallback) - :m_aabbCallback(orgCallback) + : m_aabbCallback(orgCallback) { } - void Process(const btDbvtNode* leaf) + void Process(const btDbvtNode* leaf) { - btDbvtProxy* proxy=(btDbvtProxy*)leaf->data; + btDbvtProxy* proxy = (btDbvtProxy*)leaf->data; m_aabbCallback.process(proxy); } -}; +}; -void btDbvtBroadphase::aabbTest(const btVector3& aabbMin,const btVector3& aabbMax,btBroadphaseAabbCallback& aabbCallback) +void btDbvtBroadphase::aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& aabbCallback) { BroadphaseAabbTester callback(aabbCallback); - const ATTRIBUTE_ALIGNED16(btDbvtVolume) bounds=btDbvtVolume::FromMM(aabbMin,aabbMax); - //process all children, that overlap with the given AABB bounds - m_sets[0].collideTV(m_sets[0].m_root,bounds,callback); - m_sets[1].collideTV(m_sets[1].m_root,bounds,callback); - + const ATTRIBUTE_ALIGNED16(btDbvtVolume) bounds = btDbvtVolume::FromMM(aabbMin, aabbMax); + //process all children, that overlap with the given AABB bounds + m_sets[0].collideTV(m_sets[0].m_root, bounds, callback); + m_sets[1].collideTV(m_sets[1].m_root, bounds, callback); } - - // -void btDbvtBroadphase::setAabb( btBroadphaseProxy* absproxy, - const btVector3& aabbMin, - const btVector3& aabbMax, - btDispatcher* /*dispatcher*/) +void btDbvtBroadphase::setAabb(btBroadphaseProxy* absproxy, + const btVector3& aabbMin, + const btVector3& aabbMax, + btDispatcher* /*dispatcher*/) { - btDbvtProxy* proxy=(btDbvtProxy*)absproxy; - ATTRIBUTE_ALIGNED16(btDbvtVolume) aabb=btDbvtVolume::FromMM(aabbMin,aabbMax); + btDbvtProxy* proxy = (btDbvtProxy*)absproxy; + ATTRIBUTE_ALIGNED16(btDbvtVolume) + aabb = btDbvtVolume::FromMM(aabbMin, aabbMax); #if DBVT_BP_PREVENTFALSEUPDATE - if(NotEqual(aabb,proxy->leaf->volume)) + if (NotEqual(aabb, proxy->leaf->volume)) #endif { - bool docollide=false; - if(proxy->stage==STAGECOUNT) - {/* fixed -> dynamic set */ + bool docollide = false; + if (proxy->stage == STAGECOUNT) + { /* fixed -> dynamic set */ m_sets[1].remove(proxy->leaf); - proxy->leaf=m_sets[0].insert(aabb,proxy); - docollide=true; + proxy->leaf = m_sets[0].insert(aabb, proxy); + docollide = true; } else - {/* dynamic set */ + { /* dynamic set */ ++m_updates_call; - if(Intersect(proxy->leaf->volume,aabb)) - {/* Moving */ - - const btVector3 delta=aabbMin-proxy->m_aabbMin; - btVector3 velocity(((proxy->m_aabbMax-proxy->m_aabbMin)/2)*m_prediction); - if(delta[0]<0) velocity[0]=-velocity[0]; - if(delta[1]<0) velocity[1]=-velocity[1]; - if(delta[2]<0) velocity[2]=-velocity[2]; + if (Intersect(proxy->leaf->volume, aabb)) + { /* Moving */ + + const btVector3 delta = aabbMin - proxy->m_aabbMin; + btVector3 velocity(((proxy->m_aabbMax - proxy->m_aabbMin) / 2) * m_prediction); + if (delta[0] < 0) velocity[0] = -velocity[0]; + if (delta[1] < 0) velocity[1] = -velocity[1]; + if (delta[2] < 0) velocity[2] = -velocity[2]; if ( m_sets[0].update(proxy->leaf, aabb, velocity, gDbvtMargin) - ) + ) { ++m_updates_done; - docollide=true; + docollide = true; } } else - {/* Teleporting */ - m_sets[0].update(proxy->leaf,aabb); + { /* Teleporting */ + m_sets[0].update(proxy->leaf, aabb); ++m_updates_done; - docollide=true; - } + docollide = true; + } } - listremove(proxy,m_stageRoots[proxy->stage]); + listremove(proxy, m_stageRoots[proxy->stage]); proxy->m_aabbMin = aabbMin; proxy->m_aabbMax = aabbMax; - proxy->stage = m_stageCurrent; - listappend(proxy,m_stageRoots[m_stageCurrent]); - if(docollide) + proxy->stage = m_stageCurrent; + listappend(proxy, m_stageRoots[m_stageCurrent]); + if (docollide) { - m_needcleanup=true; - if(!m_deferedcollide) + m_needcleanup = true; + if (!m_deferedcollide) { - btDbvtTreeCollider collider(this); - m_sets[1].collideTTpersistentStack(m_sets[1].m_root,proxy->leaf,collider); - m_sets[0].collideTTpersistentStack(m_sets[0].m_root,proxy->leaf,collider); + btDbvtTreeCollider collider(this); + m_sets[1].collideTTpersistentStack(m_sets[1].m_root, proxy->leaf, collider); + m_sets[0].collideTTpersistentStack(m_sets[0].m_root, proxy->leaf, collider); } - } + } } } - // -void btDbvtBroadphase::setAabbForceUpdate( btBroadphaseProxy* absproxy, - const btVector3& aabbMin, - const btVector3& aabbMax, - btDispatcher* /*dispatcher*/) +void btDbvtBroadphase::setAabbForceUpdate(btBroadphaseProxy* absproxy, + const btVector3& aabbMin, + const btVector3& aabbMax, + btDispatcher* /*dispatcher*/) { - btDbvtProxy* proxy=(btDbvtProxy*)absproxy; - ATTRIBUTE_ALIGNED16(btDbvtVolume) aabb=btDbvtVolume::FromMM(aabbMin,aabbMax); - bool docollide=false; - if(proxy->stage==STAGECOUNT) - {/* fixed -> dynamic set */ + btDbvtProxy* proxy = (btDbvtProxy*)absproxy; + ATTRIBUTE_ALIGNED16(btDbvtVolume) + aabb = btDbvtVolume::FromMM(aabbMin, aabbMax); + bool docollide = false; + if (proxy->stage == STAGECOUNT) + { /* fixed -> dynamic set */ m_sets[1].remove(proxy->leaf); - proxy->leaf=m_sets[0].insert(aabb,proxy); - docollide=true; + proxy->leaf = m_sets[0].insert(aabb, proxy); + docollide = true; } else - {/* dynamic set */ + { /* dynamic set */ ++m_updates_call; - /* Teleporting */ - m_sets[0].update(proxy->leaf,aabb); + /* Teleporting */ + m_sets[0].update(proxy->leaf, aabb); ++m_updates_done; - docollide=true; + docollide = true; } - listremove(proxy,m_stageRoots[proxy->stage]); + listremove(proxy, m_stageRoots[proxy->stage]); proxy->m_aabbMin = aabbMin; proxy->m_aabbMax = aabbMax; - proxy->stage = m_stageCurrent; - listappend(proxy,m_stageRoots[m_stageCurrent]); - if(docollide) + proxy->stage = m_stageCurrent; + listappend(proxy, m_stageRoots[m_stageCurrent]); + if (docollide) { - m_needcleanup=true; - if(!m_deferedcollide) + m_needcleanup = true; + if (!m_deferedcollide) { - btDbvtTreeCollider collider(this); - m_sets[1].collideTTpersistentStack(m_sets[1].m_root,proxy->leaf,collider); - m_sets[0].collideTTpersistentStack(m_sets[0].m_root,proxy->leaf,collider); + btDbvtTreeCollider collider(this); + m_sets[1].collideTTpersistentStack(m_sets[1].m_root, proxy->leaf, collider); + m_sets[0].collideTTpersistentStack(m_sets[0].m_root, proxy->leaf, collider); } - } + } } // -void btDbvtBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) +void btDbvtBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) { collide(dispatcher); #if DBVT_BP_PROFILE - if(0==(m_pid%DBVT_BP_PROFILING_RATE)) - { - printf("fixed(%u) dynamics(%u) pairs(%u)\r\n",m_sets[1].m_leaves,m_sets[0].m_leaves,m_paircache->getNumOverlappingPairs()); - unsigned int total=m_profiling.m_total; - if(total<=0) total=1; - printf("ddcollide: %u%% (%uus)\r\n",(50+m_profiling.m_ddcollide*100)/total,m_profiling.m_ddcollide/DBVT_BP_PROFILING_RATE); - printf("fdcollide: %u%% (%uus)\r\n",(50+m_profiling.m_fdcollide*100)/total,m_profiling.m_fdcollide/DBVT_BP_PROFILING_RATE); - printf("cleanup: %u%% (%uus)\r\n",(50+m_profiling.m_cleanup*100)/total,m_profiling.m_cleanup/DBVT_BP_PROFILING_RATE); - printf("total: %uus\r\n",total/DBVT_BP_PROFILING_RATE); - const unsigned long sum=m_profiling.m_ddcollide+ - m_profiling.m_fdcollide+ - m_profiling.m_cleanup; - printf("leaked: %u%% (%uus)\r\n",100-((50+sum*100)/total),(total-sum)/DBVT_BP_PROFILING_RATE); - printf("job counts: %u%%\r\n",(m_profiling.m_jobcount*100)/((m_sets[0].m_leaves+m_sets[1].m_leaves)*DBVT_BP_PROFILING_RATE)); + if (0 == (m_pid % DBVT_BP_PROFILING_RATE)) + { + printf("fixed(%u) dynamics(%u) pairs(%u)\r\n", m_sets[1].m_leaves, m_sets[0].m_leaves, m_paircache->getNumOverlappingPairs()); + unsigned int total = m_profiling.m_total; + if (total <= 0) total = 1; + printf("ddcollide: %u%% (%uus)\r\n", (50 + m_profiling.m_ddcollide * 100) / total, m_profiling.m_ddcollide / DBVT_BP_PROFILING_RATE); + printf("fdcollide: %u%% (%uus)\r\n", (50 + m_profiling.m_fdcollide * 100) / total, m_profiling.m_fdcollide / DBVT_BP_PROFILING_RATE); + printf("cleanup: %u%% (%uus)\r\n", (50 + m_profiling.m_cleanup * 100) / total, m_profiling.m_cleanup / DBVT_BP_PROFILING_RATE); + printf("total: %uus\r\n", total / DBVT_BP_PROFILING_RATE); + const unsigned long sum = m_profiling.m_ddcollide + + m_profiling.m_fdcollide + + m_profiling.m_cleanup; + printf("leaked: %u%% (%uus)\r\n", 100 - ((50 + sum * 100) / total), (total - sum) / DBVT_BP_PROFILING_RATE); + printf("job counts: %u%%\r\n", (m_profiling.m_jobcount * 100) / ((m_sets[0].m_leaves + m_sets[1].m_leaves) * DBVT_BP_PROFILING_RATE)); clear(m_profiling); m_clock.reset(); } #endif performDeferredRemoval(dispatcher); - } void btDbvtBroadphase::performDeferredRemoval(btDispatcher* dispatcher) { - if (m_paircache->hasDeferredRemoval()) { - - btBroadphasePairArray& overlappingPairArray = m_paircache->getOverlappingPairArray(); + btBroadphasePairArray& overlappingPairArray = m_paircache->getOverlappingPairArray(); //perform a sort, to find duplicates and to sort 'invalid' pairs to the end overlappingPairArray.quickSort(btBroadphasePairSortPredicate()); int invalidPair = 0; - int i; btBroadphasePair previousPair; previousPair.m_pProxy0 = 0; previousPair.m_pProxy1 = 0; previousPair.m_algorithm = 0; - - - for (i=0;i<overlappingPairArray.size();i++) + + for (i = 0; i < overlappingPairArray.size(); i++) { - btBroadphasePair& pair = overlappingPairArray[i]; bool isDuplicate = (pair == previousPair); @@ -471,34 +471,35 @@ void btDbvtBroadphase::performDeferredRemoval(btDispatcher* dispatcher) if (!isDuplicate) { //important to perform AABB check that is consistent with the broadphase - btDbvtProxy* pa=(btDbvtProxy*)pair.m_pProxy0; - btDbvtProxy* pb=(btDbvtProxy*)pair.m_pProxy1; - bool hasOverlap = Intersect(pa->leaf->volume,pb->leaf->volume); + btDbvtProxy* pa = (btDbvtProxy*)pair.m_pProxy0; + btDbvtProxy* pb = (btDbvtProxy*)pair.m_pProxy1; + bool hasOverlap = Intersect(pa->leaf->volume, pb->leaf->volume); if (hasOverlap) { needsRemoval = false; - } else + } + else { needsRemoval = true; } - } else + } + else { //remove duplicate needsRemoval = true; //should have no algorithm btAssert(!pair.m_algorithm); } - + if (needsRemoval) { - m_paircache->cleanOverlappingPair(pair,dispatcher); + m_paircache->cleanOverlappingPair(pair, dispatcher); pair.m_pProxy0 = 0; pair.m_pProxy1 = 0; invalidPair++; - } - + } } //perform a sort, to sort 'invalid' pairs to the end @@ -508,7 +509,7 @@ void btDbvtBroadphase::performDeferredRemoval(btDispatcher* dispatcher) } // -void btDbvtBroadphase::collide(btDispatcher* dispatcher) +void btDbvtBroadphase::collide(btDispatcher* dispatcher) { /*printf("---------------------------------------------------------\n"); printf("m_sets[0].m_leaves=%d\n",m_sets[0].m_leaves); @@ -525,295 +526,303 @@ void btDbvtBroadphase::collide(btDispatcher* dispatcher) } */ - - SPC(m_profiling.m_total); - /* optimize */ - m_sets[0].optimizeIncremental(1+(m_sets[0].m_leaves*m_dupdates)/100); - if(m_fixedleft) + /* optimize */ + m_sets[0].optimizeIncremental(1 + (m_sets[0].m_leaves * m_dupdates) / 100); + if (m_fixedleft) { - const int count=1+(m_sets[1].m_leaves*m_fupdates)/100; - m_sets[1].optimizeIncremental(1+(m_sets[1].m_leaves*m_fupdates)/100); - m_fixedleft=btMax<int>(0,m_fixedleft-count); + const int count = 1 + (m_sets[1].m_leaves * m_fupdates) / 100; + m_sets[1].optimizeIncremental(1 + (m_sets[1].m_leaves * m_fupdates) / 100); + m_fixedleft = btMax<int>(0, m_fixedleft - count); } - /* dynamic -> fixed set */ - m_stageCurrent=(m_stageCurrent+1)%STAGECOUNT; - btDbvtProxy* current=m_stageRoots[m_stageCurrent]; - if(current) + /* dynamic -> fixed set */ + m_stageCurrent = (m_stageCurrent + 1) % STAGECOUNT; + btDbvtProxy* current = m_stageRoots[m_stageCurrent]; + if (current) { #if DBVT_BP_ACCURATESLEEPING - btDbvtTreeCollider collider(this); + btDbvtTreeCollider collider(this); #endif - do { - btDbvtProxy* next=current->links[1]; - listremove(current,m_stageRoots[current->stage]); - listappend(current,m_stageRoots[STAGECOUNT]); + do + { + btDbvtProxy* next = current->links[1]; + listremove(current, m_stageRoots[current->stage]); + listappend(current, m_stageRoots[STAGECOUNT]); #if DBVT_BP_ACCURATESLEEPING - m_paircache->removeOverlappingPairsContainingProxy(current,dispatcher); - collider.proxy=current; - btDbvt::collideTV(m_sets[0].m_root,current->aabb,collider); - btDbvt::collideTV(m_sets[1].m_root,current->aabb,collider); + m_paircache->removeOverlappingPairsContainingProxy(current, dispatcher); + collider.proxy = current; + btDbvt::collideTV(m_sets[0].m_root, current->aabb, collider); + btDbvt::collideTV(m_sets[1].m_root, current->aabb, collider); #endif m_sets[0].remove(current->leaf); - ATTRIBUTE_ALIGNED16(btDbvtVolume) curAabb=btDbvtVolume::FromMM(current->m_aabbMin,current->m_aabbMax); - current->leaf = m_sets[1].insert(curAabb,current); - current->stage = STAGECOUNT; - current = next; - } while(current); - m_fixedleft=m_sets[1].m_leaves; - m_needcleanup=true; + ATTRIBUTE_ALIGNED16(btDbvtVolume) + curAabb = btDbvtVolume::FromMM(current->m_aabbMin, current->m_aabbMax); + current->leaf = m_sets[1].insert(curAabb, current); + current->stage = STAGECOUNT; + current = next; + } while (current); + m_fixedleft = m_sets[1].m_leaves; + m_needcleanup = true; } - /* collide dynamics */ + /* collide dynamics */ { - btDbvtTreeCollider collider(this); - if(m_deferedcollide) + btDbvtTreeCollider collider(this); + if (m_deferedcollide) { SPC(m_profiling.m_fdcollide); - m_sets[0].collideTTpersistentStack(m_sets[0].m_root,m_sets[1].m_root,collider); + m_sets[0].collideTTpersistentStack(m_sets[0].m_root, m_sets[1].m_root, collider); } - if(m_deferedcollide) + if (m_deferedcollide) { SPC(m_profiling.m_ddcollide); - m_sets[0].collideTTpersistentStack(m_sets[0].m_root,m_sets[0].m_root,collider); + m_sets[0].collideTTpersistentStack(m_sets[0].m_root, m_sets[0].m_root, collider); } } - /* clean up */ - if(m_needcleanup) + /* clean up */ + if (m_needcleanup) { SPC(m_profiling.m_cleanup); - btBroadphasePairArray& pairs=m_paircache->getOverlappingPairArray(); - if(pairs.size()>0) + btBroadphasePairArray& pairs = m_paircache->getOverlappingPairArray(); + if (pairs.size() > 0) { - - int ni=btMin(pairs.size(),btMax<int>(m_newpairs,(pairs.size()*m_cupdates)/100)); - for(int i=0;i<ni;++i) + int ni = btMin(pairs.size(), btMax<int>(m_newpairs, (pairs.size() * m_cupdates) / 100)); + for (int i = 0; i < ni; ++i) { - btBroadphasePair& p=pairs[(m_cid+i)%pairs.size()]; - btDbvtProxy* pa=(btDbvtProxy*)p.m_pProxy0; - btDbvtProxy* pb=(btDbvtProxy*)p.m_pProxy1; - if(!Intersect(pa->leaf->volume,pb->leaf->volume)) + btBroadphasePair& p = pairs[(m_cid + i) % pairs.size()]; + btDbvtProxy* pa = (btDbvtProxy*)p.m_pProxy0; + btDbvtProxy* pb = (btDbvtProxy*)p.m_pProxy1; + if (!Intersect(pa->leaf->volume, pb->leaf->volume)) { #if DBVT_BP_SORTPAIRS - if(pa->m_uniqueId>pb->m_uniqueId) - btSwap(pa,pb); + if (pa->m_uniqueId > pb->m_uniqueId) + btSwap(pa, pb); #endif - m_paircache->removeOverlappingPair(pa,pb,dispatcher); - --ni;--i; + m_paircache->removeOverlappingPair(pa, pb, dispatcher); + --ni; + --i; } } - if(pairs.size()>0) m_cid=(m_cid+ni)%pairs.size(); else m_cid=0; + if (pairs.size() > 0) + m_cid = (m_cid + ni) % pairs.size(); + else + m_cid = 0; } } ++m_pid; - m_newpairs=1; - m_needcleanup=false; - if(m_updates_call>0) - { m_updates_ratio=m_updates_done/(btScalar)m_updates_call; } + m_newpairs = 1; + m_needcleanup = false; + if (m_updates_call > 0) + { + m_updates_ratio = m_updates_done / (btScalar)m_updates_call; + } else - { m_updates_ratio=0; } - m_updates_done/=2; - m_updates_call/=2; + { + m_updates_ratio = 0; + } + m_updates_done /= 2; + m_updates_call /= 2; } // -void btDbvtBroadphase::optimize() +void btDbvtBroadphase::optimize() { m_sets[0].optimizeTopDown(); m_sets[1].optimizeTopDown(); } // -btOverlappingPairCache* btDbvtBroadphase::getOverlappingPairCache() +btOverlappingPairCache* btDbvtBroadphase::getOverlappingPairCache() { - return(m_paircache); + return (m_paircache); } // -const btOverlappingPairCache* btDbvtBroadphase::getOverlappingPairCache() const +const btOverlappingPairCache* btDbvtBroadphase::getOverlappingPairCache() const { - return(m_paircache); + return (m_paircache); } // -void btDbvtBroadphase::getBroadphaseAabb(btVector3& aabbMin,btVector3& aabbMax) const +void btDbvtBroadphase::getBroadphaseAabb(btVector3& aabbMin, btVector3& aabbMax) const { + ATTRIBUTE_ALIGNED16(btDbvtVolume) + bounds; - ATTRIBUTE_ALIGNED16(btDbvtVolume) bounds; - - if(!m_sets[0].empty()) - if(!m_sets[1].empty()) Merge( m_sets[0].m_root->volume, - m_sets[1].m_root->volume,bounds); + if (!m_sets[0].empty()) + if (!m_sets[1].empty()) + Merge(m_sets[0].m_root->volume, + m_sets[1].m_root->volume, bounds); else - bounds=m_sets[0].m_root->volume; - else if(!m_sets[1].empty()) bounds=m_sets[1].m_root->volume; + bounds = m_sets[0].m_root->volume; + else if (!m_sets[1].empty()) + bounds = m_sets[1].m_root->volume; else - bounds=btDbvtVolume::FromCR(btVector3(0,0,0),0); - aabbMin=bounds.Mins(); - aabbMax=bounds.Maxs(); + bounds = btDbvtVolume::FromCR(btVector3(0, 0, 0), 0); + aabbMin = bounds.Mins(); + aabbMax = bounds.Maxs(); } void btDbvtBroadphase::resetPool(btDispatcher* dispatcher) { - int totalObjects = m_sets[0].m_leaves + m_sets[1].m_leaves; if (!totalObjects) { //reset internal dynamic tree data structures m_sets[0].clear(); m_sets[1].clear(); - - m_deferedcollide = false; - m_needcleanup = true; - m_stageCurrent = 0; - m_fixedleft = 0; - m_fupdates = 1; - m_dupdates = 0; - m_cupdates = 10; - m_newpairs = 1; - m_updates_call = 0; - m_updates_done = 0; - m_updates_ratio = 0; - - m_gid = 0; - m_pid = 0; - m_cid = 0; - for(int i=0;i<=STAGECOUNT;++i) + + m_deferedcollide = false; + m_needcleanup = true; + m_stageCurrent = 0; + m_fixedleft = 0; + m_fupdates = 1; + m_dupdates = 0; + m_cupdates = 10; + m_newpairs = 1; + m_updates_call = 0; + m_updates_done = 0; + m_updates_ratio = 0; + + m_gid = 0; + m_pid = 0; + m_cid = 0; + for (int i = 0; i <= STAGECOUNT; ++i) { - m_stageRoots[i]=0; + m_stageRoots[i] = 0; } } } // -void btDbvtBroadphase::printStats() -{} +void btDbvtBroadphase::printStats() +{ +} // #if DBVT_BP_ENABLE_BENCHMARK -struct btBroadphaseBenchmark +struct btBroadphaseBenchmark { - struct Experiment + struct Experiment { - const char* name; - int object_count; - int update_count; - int spawn_count; - int iterations; - btScalar speed; - btScalar amplitude; + const char* name; + int object_count; + int update_count; + int spawn_count; + int iterations; + btScalar speed; + btScalar amplitude; }; - struct Object + struct Object { - btVector3 center; - btVector3 extents; - btBroadphaseProxy* proxy; - btScalar time; - void update(btScalar speed,btScalar amplitude,btBroadphaseInterface* pbi) + btVector3 center; + btVector3 extents; + btBroadphaseProxy* proxy; + btScalar time; + void update(btScalar speed, btScalar amplitude, btBroadphaseInterface* pbi) { - time += speed; - center[0] = btCos(time*(btScalar)2.17)*amplitude+ - btSin(time)*amplitude/2; - center[1] = btCos(time*(btScalar)1.38)*amplitude+ - btSin(time)*amplitude; - center[2] = btSin(time*(btScalar)0.777)*amplitude; - pbi->setAabb(proxy,center-extents,center+extents,0); + time += speed; + center[0] = btCos(time * (btScalar)2.17) * amplitude + + btSin(time) * amplitude / 2; + center[1] = btCos(time * (btScalar)1.38) * amplitude + + btSin(time) * amplitude; + center[2] = btSin(time * (btScalar)0.777) * amplitude; + pbi->setAabb(proxy, center - extents, center + extents, 0); } }; - static int UnsignedRand(int range=RAND_MAX-1) { return(rand()%(range+1)); } - static btScalar UnitRand() { return(UnsignedRand(16384)/(btScalar)16384); } - static void OutputTime(const char* name,btClock& c,unsigned count=0) + static int UnsignedRand(int range = RAND_MAX - 1) { return (rand() % (range + 1)); } + static btScalar UnitRand() { return (UnsignedRand(16384) / (btScalar)16384); } + static void OutputTime(const char* name, btClock& c, unsigned count = 0) { - const unsigned long us=c.getTimeMicroseconds(); - const unsigned long ms=(us+500)/1000; - const btScalar sec=us/(btScalar)(1000*1000); - if(count>0) - printf("%s : %u us (%u ms), %.2f/s\r\n",name,us,ms,count/sec); + const unsigned long us = c.getTimeMicroseconds(); + const unsigned long ms = (us + 500) / 1000; + const btScalar sec = us / (btScalar)(1000 * 1000); + if (count > 0) + printf("%s : %u us (%u ms), %.2f/s\r\n", name, us, ms, count / sec); else - printf("%s : %u us (%u ms)\r\n",name,us,ms); + printf("%s : %u us (%u ms)\r\n", name, us, ms); } }; -void btDbvtBroadphase::benchmark(btBroadphaseInterface* pbi) +void btDbvtBroadphase::benchmark(btBroadphaseInterface* pbi) { - static const btBroadphaseBenchmark::Experiment experiments[]= - { - {"1024o.10%",1024,10,0,8192,(btScalar)0.005,(btScalar)100}, - /*{"4096o.10%",4096,10,0,8192,(btScalar)0.005,(btScalar)100}, + static const btBroadphaseBenchmark::Experiment experiments[] = + { + {"1024o.10%", 1024, 10, 0, 8192, (btScalar)0.005, (btScalar)100}, + /*{"4096o.10%",4096,10,0,8192,(btScalar)0.005,(btScalar)100}, {"8192o.10%",8192,10,0,8192,(btScalar)0.005,(btScalar)100},*/ - }; - static const int nexperiments=sizeof(experiments)/sizeof(experiments[0]); - btAlignedObjectArray<btBroadphaseBenchmark::Object*> objects; - btClock wallclock; - /* Begin */ - for(int iexp=0;iexp<nexperiments;++iexp) + }; + static const int nexperiments = sizeof(experiments) / sizeof(experiments[0]); + btAlignedObjectArray<btBroadphaseBenchmark::Object*> objects; + btClock wallclock; + /* Begin */ + for (int iexp = 0; iexp < nexperiments; ++iexp) { - const btBroadphaseBenchmark::Experiment& experiment=experiments[iexp]; - const int object_count=experiment.object_count; - const int update_count=(object_count*experiment.update_count)/100; - const int spawn_count=(object_count*experiment.spawn_count)/100; - const btScalar speed=experiment.speed; - const btScalar amplitude=experiment.amplitude; - printf("Experiment #%u '%s':\r\n",iexp,experiment.name); - printf("\tObjects: %u\r\n",object_count); - printf("\tUpdate: %u\r\n",update_count); - printf("\tSpawn: %u\r\n",spawn_count); - printf("\tSpeed: %f\r\n",speed); - printf("\tAmplitude: %f\r\n",amplitude); + const btBroadphaseBenchmark::Experiment& experiment = experiments[iexp]; + const int object_count = experiment.object_count; + const int update_count = (object_count * experiment.update_count) / 100; + const int spawn_count = (object_count * experiment.spawn_count) / 100; + const btScalar speed = experiment.speed; + const btScalar amplitude = experiment.amplitude; + printf("Experiment #%u '%s':\r\n", iexp, experiment.name); + printf("\tObjects: %u\r\n", object_count); + printf("\tUpdate: %u\r\n", update_count); + printf("\tSpawn: %u\r\n", spawn_count); + printf("\tSpeed: %f\r\n", speed); + printf("\tAmplitude: %f\r\n", amplitude); srand(180673); - /* Create objects */ + /* Create objects */ wallclock.reset(); objects.reserve(object_count); - for(int i=0;i<object_count;++i) + for (int i = 0; i < object_count; ++i) { - btBroadphaseBenchmark::Object* po=new btBroadphaseBenchmark::Object(); - po->center[0]=btBroadphaseBenchmark::UnitRand()*50; - po->center[1]=btBroadphaseBenchmark::UnitRand()*50; - po->center[2]=btBroadphaseBenchmark::UnitRand()*50; - po->extents[0]=btBroadphaseBenchmark::UnitRand()*2+2; - po->extents[1]=btBroadphaseBenchmark::UnitRand()*2+2; - po->extents[2]=btBroadphaseBenchmark::UnitRand()*2+2; - po->time=btBroadphaseBenchmark::UnitRand()*2000; - po->proxy=pbi->createProxy(po->center-po->extents,po->center+po->extents,0,po,1,1,0,0); + btBroadphaseBenchmark::Object* po = new btBroadphaseBenchmark::Object(); + po->center[0] = btBroadphaseBenchmark::UnitRand() * 50; + po->center[1] = btBroadphaseBenchmark::UnitRand() * 50; + po->center[2] = btBroadphaseBenchmark::UnitRand() * 50; + po->extents[0] = btBroadphaseBenchmark::UnitRand() * 2 + 2; + po->extents[1] = btBroadphaseBenchmark::UnitRand() * 2 + 2; + po->extents[2] = btBroadphaseBenchmark::UnitRand() * 2 + 2; + po->time = btBroadphaseBenchmark::UnitRand() * 2000; + po->proxy = pbi->createProxy(po->center - po->extents, po->center + po->extents, 0, po, 1, 1, 0, 0); objects.push_back(po); } - btBroadphaseBenchmark::OutputTime("\tInitialization",wallclock); - /* First update */ + btBroadphaseBenchmark::OutputTime("\tInitialization", wallclock); + /* First update */ wallclock.reset(); - for(int i=0;i<objects.size();++i) + for (int i = 0; i < objects.size(); ++i) { - objects[i]->update(speed,amplitude,pbi); + objects[i]->update(speed, amplitude, pbi); } - btBroadphaseBenchmark::OutputTime("\tFirst update",wallclock); - /* Updates */ + btBroadphaseBenchmark::OutputTime("\tFirst update", wallclock); + /* Updates */ wallclock.reset(); - for(int i=0;i<experiment.iterations;++i) + for (int i = 0; i < experiment.iterations; ++i) { - for(int j=0;j<update_count;++j) - { - objects[j]->update(speed,amplitude,pbi); + for (int j = 0; j < update_count; ++j) + { + objects[j]->update(speed, amplitude, pbi); } pbi->calculateOverlappingPairs(0); } - btBroadphaseBenchmark::OutputTime("\tUpdate",wallclock,experiment.iterations); - /* Clean up */ + btBroadphaseBenchmark::OutputTime("\tUpdate", wallclock, experiment.iterations); + /* Clean up */ wallclock.reset(); - for(int i=0;i<objects.size();++i) + for (int i = 0; i < objects.size(); ++i) { - pbi->destroyProxy(objects[i]->proxy,0); + pbi->destroyProxy(objects[i]->proxy, 0); delete objects[i]; } objects.resize(0); - btBroadphaseBenchmark::OutputTime("\tRelease",wallclock); + btBroadphaseBenchmark::OutputTime("\tRelease", wallclock); } - } #else -void btDbvtBroadphase::benchmark(btBroadphaseInterface*) -{} +void btDbvtBroadphase::benchmark(btBroadphaseInterface*) +{ +} #endif #if DBVT_BP_PROFILE -#undef SPC +#undef SPC #endif - diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.h index 90b333d846..a71feef53b 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDbvtBroadphase.h @@ -24,16 +24,16 @@ subject to the following restrictions: // Compile time config // -#define DBVT_BP_PROFILE 0 +#define DBVT_BP_PROFILE 0 //#define DBVT_BP_SORTPAIRS 1 -#define DBVT_BP_PREVENTFALSEUPDATE 0 -#define DBVT_BP_ACCURATESLEEPING 0 -#define DBVT_BP_ENABLE_BENCHMARK 0 +#define DBVT_BP_PREVENTFALSEUPDATE 0 +#define DBVT_BP_ACCURATESLEEPING 0 +#define DBVT_BP_ENABLE_BENCHMARK 0 //#define DBVT_BP_MARGIN (btScalar)0.05 extern btScalar gDbvtMargin; #if DBVT_BP_PROFILE -#define DBVT_BP_PROFILING_RATE 256 +#define DBVT_BP_PROFILING_RATE 256 #include "LinearMath/btQuickprof.h" #endif @@ -42,90 +42,90 @@ extern btScalar gDbvtMargin; // struct btDbvtProxy : btBroadphaseProxy { - /* Fields */ + /* Fields */ //btDbvtAabbMm aabb; - btDbvtNode* leaf; - btDbvtProxy* links[2]; - int stage; - /* ctor */ - btDbvtProxy(const btVector3& aabbMin,const btVector3& aabbMax,void* userPtr, int collisionFilterGroup, int collisionFilterMask) : - btBroadphaseProxy(aabbMin,aabbMax,userPtr,collisionFilterGroup,collisionFilterMask) + btDbvtNode* leaf; + btDbvtProxy* links[2]; + int stage; + /* ctor */ + btDbvtProxy(const btVector3& aabbMin, const btVector3& aabbMax, void* userPtr, int collisionFilterGroup, int collisionFilterMask) : btBroadphaseProxy(aabbMin, aabbMax, userPtr, collisionFilterGroup, collisionFilterMask) { - links[0]=links[1]=0; + links[0] = links[1] = 0; } }; -typedef btAlignedObjectArray<btDbvtProxy*> btDbvtProxyArray; +typedef btAlignedObjectArray<btDbvtProxy*> btDbvtProxyArray; ///The btDbvtBroadphase implements a broadphase using two dynamic AABB bounding volume hierarchies/trees (see btDbvt). ///One tree is used for static/non-moving objects, and another tree is used for dynamic objects. Objects can move from one tree to the other. ///This is a very fast broadphase, especially for very dynamic worlds where many objects are moving. Its insert/add and remove of objects is generally faster than the sweep and prune broadphases btAxisSweep3 and bt32BitAxisSweep3. -struct btDbvtBroadphase : btBroadphaseInterface +struct btDbvtBroadphase : btBroadphaseInterface { - /* Config */ - enum { - DYNAMIC_SET = 0, /* Dynamic set index */ - FIXED_SET = 1, /* Fixed set index */ - STAGECOUNT = 2 /* Number of stages */ + /* Config */ + enum + { + DYNAMIC_SET = 0, /* Dynamic set index */ + FIXED_SET = 1, /* Fixed set index */ + STAGECOUNT = 2 /* Number of stages */ }; - /* Fields */ - btDbvt m_sets[2]; // Dbvt sets - btDbvtProxy* m_stageRoots[STAGECOUNT+1]; // Stages list - btOverlappingPairCache* m_paircache; // Pair cache - btScalar m_prediction; // Velocity prediction - int m_stageCurrent; // Current stage - int m_fupdates; // % of fixed updates per frame - int m_dupdates; // % of dynamic updates per frame - int m_cupdates; // % of cleanup updates per frame - int m_newpairs; // Number of pairs created - int m_fixedleft; // Fixed optimization left - unsigned m_updates_call; // Number of updates call - unsigned m_updates_done; // Number of updates done - btScalar m_updates_ratio; // m_updates_done/m_updates_call - int m_pid; // Parse id - int m_cid; // Cleanup index - int m_gid; // Gen id - bool m_releasepaircache; // Release pair cache on delete - bool m_deferedcollide; // Defere dynamic/static collision to collide call - bool m_needcleanup; // Need to run cleanup? - btAlignedObjectArray< btAlignedObjectArray<const btDbvtNode*> > m_rayTestStacks; + /* Fields */ + btDbvt m_sets[2]; // Dbvt sets + btDbvtProxy* m_stageRoots[STAGECOUNT + 1]; // Stages list + btOverlappingPairCache* m_paircache; // Pair cache + btScalar m_prediction; // Velocity prediction + int m_stageCurrent; // Current stage + int m_fupdates; // % of fixed updates per frame + int m_dupdates; // % of dynamic updates per frame + int m_cupdates; // % of cleanup updates per frame + int m_newpairs; // Number of pairs created + int m_fixedleft; // Fixed optimization left + unsigned m_updates_call; // Number of updates call + unsigned m_updates_done; // Number of updates done + btScalar m_updates_ratio; // m_updates_done/m_updates_call + int m_pid; // Parse id + int m_cid; // Cleanup index + int m_gid; // Gen id + bool m_releasepaircache; // Release pair cache on delete + bool m_deferedcollide; // Defere dynamic/static collision to collide call + bool m_needcleanup; // Need to run cleanup? + btAlignedObjectArray<btAlignedObjectArray<const btDbvtNode*> > m_rayTestStacks; #if DBVT_BP_PROFILE - btClock m_clock; - struct { - unsigned long m_total; - unsigned long m_ddcollide; - unsigned long m_fdcollide; - unsigned long m_cleanup; - unsigned long m_jobcount; - } m_profiling; + btClock m_clock; + struct + { + unsigned long m_total; + unsigned long m_ddcollide; + unsigned long m_fdcollide; + unsigned long m_cleanup; + unsigned long m_jobcount; + } m_profiling; #endif - /* Methods */ - btDbvtBroadphase(btOverlappingPairCache* paircache=0); + /* Methods */ + btDbvtBroadphase(btOverlappingPairCache* paircache = 0); ~btDbvtBroadphase(); - void collide(btDispatcher* dispatcher); - void optimize(); - - /* btBroadphaseInterface Implementation */ - btBroadphaseProxy* createProxy(const btVector3& aabbMin,const btVector3& aabbMax,int shapeType,void* userPtr, int collisionFilterGroup, int collisionFilterMask,btDispatcher* dispatcher); - virtual void destroyProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher); - virtual void setAabb(btBroadphaseProxy* proxy,const btVector3& aabbMin,const btVector3& aabbMax,btDispatcher* dispatcher); - virtual void rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin=btVector3(0,0,0), const btVector3& aabbMax = btVector3(0,0,0)); - virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback); - - virtual void getAabb(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const; - virtual void calculateOverlappingPairs(btDispatcher* dispatcher); - virtual btOverlappingPairCache* getOverlappingPairCache(); - virtual const btOverlappingPairCache* getOverlappingPairCache() const; - virtual void getBroadphaseAabb(btVector3& aabbMin,btVector3& aabbMax) const; - virtual void printStats(); + void collide(btDispatcher* dispatcher); + void optimize(); + /* btBroadphaseInterface Implementation */ + btBroadphaseProxy* createProxy(const btVector3& aabbMin, const btVector3& aabbMax, int shapeType, void* userPtr, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher); + virtual void destroyProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher); + virtual void setAabb(btBroadphaseProxy* proxy, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* dispatcher); + virtual void rayTest(const btVector3& rayFrom, const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin = btVector3(0, 0, 0), const btVector3& aabbMax = btVector3(0, 0, 0)); + virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback); + + virtual void getAabb(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const; + virtual void calculateOverlappingPairs(btDispatcher* dispatcher); + virtual btOverlappingPairCache* getOverlappingPairCache(); + virtual const btOverlappingPairCache* getOverlappingPairCache() const; + virtual void getBroadphaseAabb(btVector3& aabbMin, btVector3& aabbMax) const; + virtual void printStats(); ///reset broadphase internal structures, to ensure determinism/reproducability virtual void resetPool(btDispatcher* dispatcher); - void performDeferredRemoval(btDispatcher* dispatcher); - - void setVelocityPrediction(btScalar prediction) + void performDeferredRemoval(btDispatcher* dispatcher); + + void setVelocityPrediction(btScalar prediction) { m_prediction = prediction; } @@ -134,15 +134,13 @@ struct btDbvtBroadphase : btBroadphaseInterface return m_prediction; } - ///this setAabbForceUpdate is similar to setAabb but always forces the aabb update. + ///this setAabbForceUpdate is similar to setAabb but always forces the aabb update. ///it is not part of the btBroadphaseInterface but specific to btDbvtBroadphase. ///it bypasses certain optimizations that prevent aabb updates (when the aabb shrinks), see ///http://code.google.com/p/bullet/issues/detail?id=223 - void setAabbForceUpdate( btBroadphaseProxy* absproxy,const btVector3& aabbMin,const btVector3& aabbMax,btDispatcher* /*dispatcher*/); - - static void benchmark(btBroadphaseInterface*); - + void setAabbForceUpdate(btBroadphaseProxy* absproxy, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* /*dispatcher*/); + static void benchmark(btBroadphaseInterface*); }; #endif diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.cpp index 20768225b3..d76d408aa6 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.cpp @@ -17,6 +17,4 @@ subject to the following restrictions: btDispatcher::~btDispatcher() { - } - diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.h index a0e4c18927..b09b7d4d42 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btDispatcher.h @@ -20,7 +20,7 @@ subject to the following restrictions: class btCollisionAlgorithm; struct btBroadphaseProxy; class btRigidBody; -class btCollisionObject; +class btCollisionObject; class btOverlappingPairCache; struct btCollisionObjectWrapper; @@ -35,35 +35,34 @@ struct btDispatcherInfo DISPATCH_CONTINUOUS }; btDispatcherInfo() - :m_timeStep(btScalar(0.)), - m_stepCount(0), - m_dispatchFunc(DISPATCH_DISCRETE), - m_timeOfImpact(btScalar(1.)), - m_useContinuous(true), - m_debugDraw(0), - m_enableSatConvex(false), - m_enableSPU(true), - m_useEpa(true), - m_allowedCcdPenetration(btScalar(0.04)), - m_useConvexConservativeDistanceUtil(false), - m_convexConservativeDistanceThreshold(0.0f), - m_deterministicOverlappingPairs(false) + : m_timeStep(btScalar(0.)), + m_stepCount(0), + m_dispatchFunc(DISPATCH_DISCRETE), + m_timeOfImpact(btScalar(1.)), + m_useContinuous(true), + m_debugDraw(0), + m_enableSatConvex(false), + m_enableSPU(true), + m_useEpa(true), + m_allowedCcdPenetration(btScalar(0.04)), + m_useConvexConservativeDistanceUtil(false), + m_convexConservativeDistanceThreshold(0.0f), + m_deterministicOverlappingPairs(false) { - } - btScalar m_timeStep; - int m_stepCount; - int m_dispatchFunc; - mutable btScalar m_timeOfImpact; - bool m_useContinuous; - class btIDebugDraw* m_debugDraw; - bool m_enableSatConvex; - bool m_enableSPU; - bool m_useEpa; - btScalar m_allowedCcdPenetration; - bool m_useConvexConservativeDistanceUtil; - btScalar m_convexConservativeDistanceThreshold; - bool m_deterministicOverlappingPairs; + btScalar m_timeStep; + int m_stepCount; + int m_dispatchFunc; + mutable btScalar m_timeOfImpact; + bool m_useContinuous; + class btIDebugDraw* m_debugDraw; + bool m_enableSatConvex; + bool m_enableSPU; + bool m_useEpa; + btScalar m_allowedCcdPenetration; + bool m_useConvexConservativeDistanceUtil; + btScalar m_convexConservativeDistanceThreshold; + bool m_deterministicOverlappingPairs; }; enum ebtDispatcherQueryType @@ -76,40 +75,36 @@ enum ebtDispatcherQueryType ///For example for pairwise collision detection, calculating contact points stored in btPersistentManifold or user callbacks (game logic). class btDispatcher { - - public: - virtual ~btDispatcher() ; + virtual ~btDispatcher(); - virtual btCollisionAlgorithm* findAlgorithm(const btCollisionObjectWrapper* body0Wrap,const btCollisionObjectWrapper* body1Wrap,btPersistentManifold* sharedManifold, ebtDispatcherQueryType queryType) = 0; + virtual btCollisionAlgorithm* findAlgorithm(const btCollisionObjectWrapper* body0Wrap, const btCollisionObjectWrapper* body1Wrap, btPersistentManifold* sharedManifold, ebtDispatcherQueryType queryType) = 0; - virtual btPersistentManifold* getNewManifold(const btCollisionObject* b0,const btCollisionObject* b1)=0; + virtual btPersistentManifold* getNewManifold(const btCollisionObject* b0, const btCollisionObject* b1) = 0; - virtual void releaseManifold(btPersistentManifold* manifold)=0; + virtual void releaseManifold(btPersistentManifold* manifold) = 0; - virtual void clearManifold(btPersistentManifold* manifold)=0; + virtual void clearManifold(btPersistentManifold* manifold) = 0; - virtual bool needsCollision(const btCollisionObject* body0,const btCollisionObject* body1) = 0; + virtual bool needsCollision(const btCollisionObject* body0, const btCollisionObject* body1) = 0; - virtual bool needsResponse(const btCollisionObject* body0,const btCollisionObject* body1)=0; + virtual bool needsResponse(const btCollisionObject* body0, const btCollisionObject* body1) = 0; - virtual void dispatchAllCollisionPairs(btOverlappingPairCache* pairCache,const btDispatcherInfo& dispatchInfo,btDispatcher* dispatcher) =0; + virtual void dispatchAllCollisionPairs(btOverlappingPairCache* pairCache, const btDispatcherInfo& dispatchInfo, btDispatcher* dispatcher) = 0; virtual int getNumManifolds() const = 0; virtual btPersistentManifold* getManifoldByIndexInternal(int index) = 0; - virtual btPersistentManifold** getInternalManifoldPointer() = 0; + virtual btPersistentManifold** getInternalManifoldPointer() = 0; - virtual btPoolAllocator* getInternalManifoldPool() = 0; + virtual btPoolAllocator* getInternalManifoldPool() = 0; - virtual const btPoolAllocator* getInternalManifoldPool() const = 0; + virtual const btPoolAllocator* getInternalManifoldPool() const = 0; - virtual void* allocateCollisionAlgorithm(int size) = 0; - - virtual void freeCollisionAlgorithm(void* ptr) = 0; + virtual void* allocateCollisionAlgorithm(int size) = 0; + virtual void freeCollisionAlgorithm(void* ptr) = 0; }; - -#endif //BT_DISPATCHER_H +#endif //BT_DISPATCHER_H diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.cpp index 9e3337c5f6..8ce1087c9f 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.cpp @@ -13,8 +13,6 @@ subject to the following restrictions: 3. This notice may not be removed or altered from any source distribution. */ - - #include "btOverlappingPairCache.h" #include "btDispatcher.h" @@ -23,121 +21,95 @@ subject to the following restrictions: #include <stdio.h> - - - - -btHashedOverlappingPairCache::btHashedOverlappingPairCache(): - m_overlapFilterCallback(0), - m_ghostPairCallback(0) +btHashedOverlappingPairCache::btHashedOverlappingPairCache() : m_overlapFilterCallback(0), + m_ghostPairCallback(0) { - int initialAllocatedSize= 2; + int initialAllocatedSize = 2; m_overlappingPairArray.reserve(initialAllocatedSize); growTables(); } - - - btHashedOverlappingPairCache::~btHashedOverlappingPairCache() { } - - -void btHashedOverlappingPairCache::cleanOverlappingPair(btBroadphasePair& pair,btDispatcher* dispatcher) +void btHashedOverlappingPairCache::cleanOverlappingPair(btBroadphasePair& pair, btDispatcher* dispatcher) { if (pair.m_algorithm && dispatcher) { { pair.m_algorithm->~btCollisionAlgorithm(); dispatcher->freeCollisionAlgorithm(pair.m_algorithm); - pair.m_algorithm=0; + pair.m_algorithm = 0; } } } - - - -void btHashedOverlappingPairCache::cleanProxyFromPairs(btBroadphaseProxy* proxy,btDispatcher* dispatcher) +void btHashedOverlappingPairCache::cleanProxyFromPairs(btBroadphaseProxy* proxy, btDispatcher* dispatcher) { - - class CleanPairCallback : public btOverlapCallback + class CleanPairCallback : public btOverlapCallback { btBroadphaseProxy* m_cleanProxy; - btOverlappingPairCache* m_pairCache; + btOverlappingPairCache* m_pairCache; btDispatcher* m_dispatcher; public: - CleanPairCallback(btBroadphaseProxy* cleanProxy,btOverlappingPairCache* pairCache,btDispatcher* dispatcher) - :m_cleanProxy(cleanProxy), - m_pairCache(pairCache), - m_dispatcher(dispatcher) + CleanPairCallback(btBroadphaseProxy* cleanProxy, btOverlappingPairCache* pairCache, btDispatcher* dispatcher) + : m_cleanProxy(cleanProxy), + m_pairCache(pairCache), + m_dispatcher(dispatcher) { } - virtual bool processOverlap(btBroadphasePair& pair) + virtual bool processOverlap(btBroadphasePair& pair) { if ((pair.m_pProxy0 == m_cleanProxy) || (pair.m_pProxy1 == m_cleanProxy)) { - m_pairCache->cleanOverlappingPair(pair,m_dispatcher); + m_pairCache->cleanOverlappingPair(pair, m_dispatcher); } return false; } - }; - CleanPairCallback cleanPairs(proxy,this,dispatcher); - - processAllOverlappingPairs(&cleanPairs,dispatcher); + CleanPairCallback cleanPairs(proxy, this, dispatcher); + processAllOverlappingPairs(&cleanPairs, dispatcher); } - - - -void btHashedOverlappingPairCache::removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher) +void btHashedOverlappingPairCache::removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher) { - - class RemovePairCallback : public btOverlapCallback + class RemovePairCallback : public btOverlapCallback { btBroadphaseProxy* m_obsoleteProxy; public: RemovePairCallback(btBroadphaseProxy* obsoleteProxy) - :m_obsoleteProxy(obsoleteProxy) + : m_obsoleteProxy(obsoleteProxy) { } - virtual bool processOverlap(btBroadphasePair& pair) + virtual bool processOverlap(btBroadphasePair& pair) { return ((pair.m_pProxy0 == m_obsoleteProxy) || - (pair.m_pProxy1 == m_obsoleteProxy)); + (pair.m_pProxy1 == m_obsoleteProxy)); } - }; - RemovePairCallback removeCallback(proxy); - processAllOverlappingPairs(&removeCallback,dispatcher); + processAllOverlappingPairs(&removeCallback, dispatcher); } - - - - btBroadphasePair* btHashedOverlappingPairCache::findPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) { - if(proxy0->m_uniqueId>proxy1->m_uniqueId) - btSwap(proxy0,proxy1); + if (proxy0->m_uniqueId > proxy1->m_uniqueId) + btSwap(proxy0, proxy1); int proxyId1 = proxy0->getUid(); int proxyId2 = proxy1->getUid(); /*if (proxyId1 > proxyId2) btSwap(proxyId1, proxyId2);*/ - int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity()-1)); + int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity() - 1)); if (hash >= m_hashTable.size()) { @@ -162,9 +134,8 @@ btBroadphasePair* btHashedOverlappingPairCache::findPair(btBroadphaseProxy* prox //#include <stdio.h> -void btHashedOverlappingPairCache::growTables() +void btHashedOverlappingPairCache::growTables() { - int newCapacity = m_overlappingPairArray.capacity(); if (m_hashTable.size() < newCapacity) @@ -175,10 +146,9 @@ void btHashedOverlappingPairCache::growTables() m_hashTable.resize(newCapacity); m_next.resize(newCapacity); - int i; - for (i= 0; i < newCapacity; ++i) + for (i = 0; i < newCapacity; ++i) { m_hashTable[i] = BT_NULL_PAIR; } @@ -187,35 +157,31 @@ void btHashedOverlappingPairCache::growTables() m_next[i] = BT_NULL_PAIR; } - for(i=0;i<curHashtableSize;i++) + for (i = 0; i < curHashtableSize; i++) { - const btBroadphasePair& pair = m_overlappingPairArray[i]; int proxyId1 = pair.m_pProxy0->getUid(); int proxyId2 = pair.m_pProxy1->getUid(); /*if (proxyId1 > proxyId2) btSwap(proxyId1, proxyId2);*/ - int hashValue = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1),static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity()-1)); // New hash value with new mask + int hashValue = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity() - 1)); // New hash value with new mask m_next[i] = m_hashTable[hashValue]; m_hashTable[hashValue] = i; } - - } } btBroadphasePair* btHashedOverlappingPairCache::internalAddPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) { - if(proxy0->m_uniqueId>proxy1->m_uniqueId) - btSwap(proxy0,proxy1); + if (proxy0->m_uniqueId > proxy1->m_uniqueId) + btSwap(proxy0, proxy1); int proxyId1 = proxy0->getUid(); int proxyId2 = proxy1->getUid(); /*if (proxyId1 > proxyId2) btSwap(proxyId1, proxyId2);*/ - int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1),static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity()-1)); // New hash value with new mask - + int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity() - 1)); // New hash value with new mask btBroadphasePair* pair = internalFindPair(proxy0, proxy1, hash); if (pair != NULL) @@ -237,7 +203,7 @@ btBroadphasePair* btHashedOverlappingPairCache::internalAddPair(btBroadphaseProx //this is where we add an actual pair, so also call the 'ghost' if (m_ghostPairCallback) - m_ghostPairCallback->addOverlappingPair(proxy0,proxy1); + m_ghostPairCallback->addOverlappingPair(proxy0, proxy1); int newCapacity = m_overlappingPairArray.capacity(); @@ -245,15 +211,14 @@ btBroadphasePair* btHashedOverlappingPairCache::internalAddPair(btBroadphaseProx { growTables(); //hash with new capacity - hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1),static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity()-1)); + hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity() - 1)); } - - pair = new (mem) btBroadphasePair(*proxy0,*proxy1); -// pair->m_pProxy0 = proxy0; -// pair->m_pProxy1 = proxy1; + + pair = new (mem) btBroadphasePair(*proxy0, *proxy1); + // pair->m_pProxy0 = proxy0; + // pair->m_pProxy1 = proxy1; pair->m_algorithm = 0; pair->m_internalTmpValue = 0; - m_next[count] = m_hashTable[hash]; m_hashTable[hash] = count; @@ -261,19 +226,17 @@ btBroadphasePair* btHashedOverlappingPairCache::internalAddPair(btBroadphaseProx return pair; } - - -void* btHashedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1,btDispatcher* dispatcher) +void* btHashedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1, btDispatcher* dispatcher) { - if(proxy0->m_uniqueId>proxy1->m_uniqueId) - btSwap(proxy0,proxy1); + if (proxy0->m_uniqueId > proxy1->m_uniqueId) + btSwap(proxy0, proxy1); int proxyId1 = proxy0->getUid(); int proxyId2 = proxy1->getUid(); /*if (proxyId1 > proxyId2) btSwap(proxyId1, proxyId2);*/ - int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1),static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity()-1)); + int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity() - 1)); btBroadphasePair* pair = internalFindPair(proxy0, proxy1, hash); if (pair == NULL) @@ -281,7 +244,7 @@ void* btHashedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* pro return 0; } - cleanOverlappingPair(*pair,dispatcher); + cleanOverlappingPair(*pair, dispatcher); void* userData = pair->m_internalInfo1; @@ -319,7 +282,7 @@ void* btHashedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* pro int lastPairIndex = m_overlappingPairArray.size() - 1; if (m_ghostPairCallback) - m_ghostPairCallback->removeOverlappingPair(proxy0, proxy1,dispatcher); + m_ghostPairCallback->removeOverlappingPair(proxy0, proxy1, dispatcher); // If the removed pair is the last pair, we are done. if (lastPairIndex == pairIndex) @@ -330,8 +293,8 @@ void* btHashedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* pro // Remove the last pair from the hash table. const btBroadphasePair* last = &m_overlappingPairArray[lastPairIndex]; - /* missing swap here too, Nat. */ - int lastHash = static_cast<int>(getHash(static_cast<unsigned int>(last->m_pProxy0->getUid()), static_cast<unsigned int>(last->m_pProxy1->getUid())) & (m_overlappingPairArray.capacity()-1)); + /* missing swap here too, Nat. */ + int lastHash = static_cast<int>(getHash(static_cast<unsigned int>(last->m_pProxy0->getUid()), static_cast<unsigned int>(last->m_pProxy1->getUid())) & (m_overlappingPairArray.capacity() - 1)); index = m_hashTable[lastHash]; btAssert(index != BT_NULL_PAIR); @@ -366,20 +329,20 @@ void* btHashedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* pro } //#include <stdio.h> #include "LinearMath/btQuickprof.h" -void btHashedOverlappingPairCache::processAllOverlappingPairs(btOverlapCallback* callback,btDispatcher* dispatcher) +void btHashedOverlappingPairCache::processAllOverlappingPairs(btOverlapCallback* callback, btDispatcher* dispatcher) { BT_PROFILE("btHashedOverlappingPairCache::processAllOverlappingPairs"); int i; -// printf("m_overlappingPairArray.size()=%d\n",m_overlappingPairArray.size()); - for (i=0;i<m_overlappingPairArray.size();) + // printf("m_overlappingPairArray.size()=%d\n",m_overlappingPairArray.size()); + for (i = 0; i < m_overlappingPairArray.size();) { - btBroadphasePair* pair = &m_overlappingPairArray[i]; if (callback->processOverlap(*pair)) { - removeOverlappingPair(pair->m_pProxy0,pair->m_pProxy1,dispatcher); - } else + removeOverlappingPair(pair->m_pProxy0, pair->m_pProxy1, dispatcher); + } + else { i++; } @@ -388,83 +351,83 @@ void btHashedOverlappingPairCache::processAllOverlappingPairs(btOverlapCallback* struct MyPairIndex { - int m_orgIndex; - int m_uidA0; - int m_uidA1; + int m_orgIndex; + int m_uidA0; + int m_uidA1; }; class MyPairIndeSortPredicate { public: - - bool operator() ( const MyPairIndex& a, const MyPairIndex& b ) const - { - const int uidA0 = a.m_uidA0; - const int uidB0 = b.m_uidA0; - const int uidA1 = a.m_uidA1; - const int uidB1 = b.m_uidA1; - return uidA0 > uidB0 || (uidA0 == uidB0 && uidA1 > uidB1); - } + bool operator()(const MyPairIndex& a, const MyPairIndex& b) const + { + const int uidA0 = a.m_uidA0; + const int uidB0 = b.m_uidA0; + const int uidA1 = a.m_uidA1; + const int uidB1 = b.m_uidA1; + return uidA0 > uidB0 || (uidA0 == uidB0 && uidA1 > uidB1); + } }; -void btHashedOverlappingPairCache::processAllOverlappingPairs(btOverlapCallback* callback,btDispatcher* dispatcher, const struct btDispatcherInfo& dispatchInfo) +void btHashedOverlappingPairCache::processAllOverlappingPairs(btOverlapCallback* callback, btDispatcher* dispatcher, const struct btDispatcherInfo& dispatchInfo) { - if (dispatchInfo.m_deterministicOverlappingPairs) - { - btBroadphasePairArray& pa = getOverlappingPairArray(); - btAlignedObjectArray<MyPairIndex> indices; - { - BT_PROFILE("sortOverlappingPairs"); - indices.resize(pa.size()); - for (int i=0;i<indices.size();i++) - { - const btBroadphasePair& p = pa[i]; - const int uidA0 = p.m_pProxy0 ? p.m_pProxy0->m_uniqueId : -1; - const int uidA1 = p.m_pProxy1 ? p.m_pProxy1->m_uniqueId : -1; - - indices[i].m_uidA0 = uidA0; - indices[i].m_uidA1 = uidA1; - indices[i].m_orgIndex = i; - } - indices.quickSort(MyPairIndeSortPredicate()); - } - { - BT_PROFILE("btHashedOverlappingPairCache::processAllOverlappingPairs"); - int i; - for (i=0;i<indices.size();) - { - btBroadphasePair* pair = &pa[indices[i].m_orgIndex]; - if (callback->processOverlap(*pair)) - { - removeOverlappingPair(pair->m_pProxy0,pair->m_pProxy1,dispatcher); - } else - { - i++; - } - } - } - } else - { - processAllOverlappingPairs(callback, dispatcher); - } -} + if (dispatchInfo.m_deterministicOverlappingPairs) + { + btBroadphasePairArray& pa = getOverlappingPairArray(); + btAlignedObjectArray<MyPairIndex> indices; + { + BT_PROFILE("sortOverlappingPairs"); + indices.resize(pa.size()); + for (int i = 0; i < indices.size(); i++) + { + const btBroadphasePair& p = pa[i]; + const int uidA0 = p.m_pProxy0 ? p.m_pProxy0->m_uniqueId : -1; + const int uidA1 = p.m_pProxy1 ? p.m_pProxy1->m_uniqueId : -1; + indices[i].m_uidA0 = uidA0; + indices[i].m_uidA1 = uidA1; + indices[i].m_orgIndex = i; + } + indices.quickSort(MyPairIndeSortPredicate()); + } + { + BT_PROFILE("btHashedOverlappingPairCache::processAllOverlappingPairs"); + int i; + for (i = 0; i < indices.size();) + { + btBroadphasePair* pair = &pa[indices[i].m_orgIndex]; + if (callback->processOverlap(*pair)) + { + removeOverlappingPair(pair->m_pProxy0, pair->m_pProxy1, dispatcher); + } + else + { + i++; + } + } + } + } + else + { + processAllOverlappingPairs(callback, dispatcher); + } +} -void btHashedOverlappingPairCache::sortOverlappingPairs(btDispatcher* dispatcher) +void btHashedOverlappingPairCache::sortOverlappingPairs(btDispatcher* dispatcher) { ///need to keep hashmap in sync with pair address, so rebuild all btBroadphasePairArray tmpPairs; int i; - for (i=0;i<m_overlappingPairArray.size();i++) + for (i = 0; i < m_overlappingPairArray.size(); i++) { tmpPairs.push_back(m_overlappingPairArray[i]); } - for (i=0;i<tmpPairs.size();i++) + for (i = 0; i < tmpPairs.size(); i++) { - removeOverlappingPair(tmpPairs[i].m_pProxy0,tmpPairs[i].m_pProxy1,dispatcher); + removeOverlappingPair(tmpPairs[i].m_pProxy0, tmpPairs[i].m_pProxy1, dispatcher); } - + for (i = 0; i < m_next.size(); i++) { m_next[i] = BT_NULL_PAIR; @@ -472,31 +435,28 @@ void btHashedOverlappingPairCache::sortOverlappingPairs(btDispatcher* dispatcher tmpPairs.quickSort(btBroadphasePairSortPredicate()); - for (i=0;i<tmpPairs.size();i++) + for (i = 0; i < tmpPairs.size(); i++) { - addOverlappingPair(tmpPairs[i].m_pProxy0,tmpPairs[i].m_pProxy1); + addOverlappingPair(tmpPairs[i].m_pProxy0, tmpPairs[i].m_pProxy1); } - - } - -void* btSortedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1, btDispatcher* dispatcher ) +void* btSortedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1, btDispatcher* dispatcher) { if (!hasDeferredRemoval()) { - btBroadphasePair findPair(*proxy0,*proxy1); + btBroadphasePair findPair(*proxy0, *proxy1); int findIndex = m_overlappingPairArray.findLinearSearch(findPair); if (findIndex < m_overlappingPairArray.size()) { btBroadphasePair& pair = m_overlappingPairArray[findIndex]; void* userData = pair.m_internalInfo1; - cleanOverlappingPair(pair,dispatcher); + cleanOverlappingPair(pair, dispatcher); if (m_ghostPairCallback) - m_ghostPairCallback->removeOverlappingPair(proxy0, proxy1,dispatcher); - - m_overlappingPairArray.swap(findIndex,m_overlappingPairArray.capacity()-1); + m_ghostPairCallback->removeOverlappingPair(proxy0, proxy1, dispatcher); + + m_overlappingPairArray.swap(findIndex, m_overlappingPairArray.capacity() - 1); m_overlappingPairArray.pop_back(); return userData; } @@ -505,95 +465,73 @@ void* btSortedOverlappingPairCache::removeOverlappingPair(btBroadphaseProxy* pro return 0; } - - - - - - - -btBroadphasePair* btSortedOverlappingPairCache::addOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) +btBroadphasePair* btSortedOverlappingPairCache::addOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) { //don't add overlap with own btAssert(proxy0 != proxy1); - if (!needsBroadphaseCollision(proxy0,proxy1)) + if (!needsBroadphaseCollision(proxy0, proxy1)) return 0; - + void* mem = &m_overlappingPairArray.expandNonInitializing(); - btBroadphasePair* pair = new (mem) btBroadphasePair(*proxy0,*proxy1); + btBroadphasePair* pair = new (mem) btBroadphasePair(*proxy0, *proxy1); - if (m_ghostPairCallback) + if (m_ghostPairCallback) m_ghostPairCallback->addOverlappingPair(proxy0, proxy1); return pair; - } ///this findPair becomes really slow. Either sort the list to speedup the query, or ///use a different solution. It is mainly used for Removing overlapping pairs. Removal could be delayed. ///we could keep a linked list in each proxy, and store pair in one of the proxies (with lowest memory address) ///Also we can use a 2D bitmap, which can be useful for a future GPU implementation - btBroadphasePair* btSortedOverlappingPairCache::findPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) +btBroadphasePair* btSortedOverlappingPairCache::findPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) { - if (!needsBroadphaseCollision(proxy0,proxy1)) + if (!needsBroadphaseCollision(proxy0, proxy1)) return 0; - btBroadphasePair tmpPair(*proxy0,*proxy1); + btBroadphasePair tmpPair(*proxy0, *proxy1); int findIndex = m_overlappingPairArray.findLinearSearch(tmpPair); if (findIndex < m_overlappingPairArray.size()) { //btAssert(it != m_overlappingPairSet.end()); - btBroadphasePair* pair = &m_overlappingPairArray[findIndex]; + btBroadphasePair* pair = &m_overlappingPairArray[findIndex]; return pair; } return 0; } - - - - - - - - - //#include <stdio.h> -void btSortedOverlappingPairCache::processAllOverlappingPairs(btOverlapCallback* callback,btDispatcher* dispatcher) +void btSortedOverlappingPairCache::processAllOverlappingPairs(btOverlapCallback* callback, btDispatcher* dispatcher) { - int i; - for (i=0;i<m_overlappingPairArray.size();) + for (i = 0; i < m_overlappingPairArray.size();) { - btBroadphasePair* pair = &m_overlappingPairArray[i]; if (callback->processOverlap(*pair)) { - cleanOverlappingPair(*pair,dispatcher); + cleanOverlappingPair(*pair, dispatcher); pair->m_pProxy0 = 0; pair->m_pProxy1 = 0; - m_overlappingPairArray.swap(i,m_overlappingPairArray.size()-1); + m_overlappingPairArray.swap(i, m_overlappingPairArray.size() - 1); m_overlappingPairArray.pop_back(); - } else + } + else { i++; } } } - - - -btSortedOverlappingPairCache::btSortedOverlappingPairCache(): - m_blockedForChanges(false), - m_hasDeferredRemoval(true), - m_overlapFilterCallback(0), - m_ghostPairCallback(0) +btSortedOverlappingPairCache::btSortedOverlappingPairCache() : m_blockedForChanges(false), + m_hasDeferredRemoval(true), + m_overlapFilterCallback(0), + m_ghostPairCallback(0) { - int initialAllocatedSize= 2; + int initialAllocatedSize = 2; m_overlappingPairArray.reserve(initialAllocatedSize); } @@ -601,81 +539,73 @@ btSortedOverlappingPairCache::~btSortedOverlappingPairCache() { } -void btSortedOverlappingPairCache::cleanOverlappingPair(btBroadphasePair& pair,btDispatcher* dispatcher) +void btSortedOverlappingPairCache::cleanOverlappingPair(btBroadphasePair& pair, btDispatcher* dispatcher) { if (pair.m_algorithm) { { pair.m_algorithm->~btCollisionAlgorithm(); dispatcher->freeCollisionAlgorithm(pair.m_algorithm); - pair.m_algorithm=0; + pair.m_algorithm = 0; } } } - -void btSortedOverlappingPairCache::cleanProxyFromPairs(btBroadphaseProxy* proxy,btDispatcher* dispatcher) +void btSortedOverlappingPairCache::cleanProxyFromPairs(btBroadphaseProxy* proxy, btDispatcher* dispatcher) { - - class CleanPairCallback : public btOverlapCallback + class CleanPairCallback : public btOverlapCallback { btBroadphaseProxy* m_cleanProxy; - btOverlappingPairCache* m_pairCache; + btOverlappingPairCache* m_pairCache; btDispatcher* m_dispatcher; public: - CleanPairCallback(btBroadphaseProxy* cleanProxy,btOverlappingPairCache* pairCache,btDispatcher* dispatcher) - :m_cleanProxy(cleanProxy), - m_pairCache(pairCache), - m_dispatcher(dispatcher) + CleanPairCallback(btBroadphaseProxy* cleanProxy, btOverlappingPairCache* pairCache, btDispatcher* dispatcher) + : m_cleanProxy(cleanProxy), + m_pairCache(pairCache), + m_dispatcher(dispatcher) { } - virtual bool processOverlap(btBroadphasePair& pair) + virtual bool processOverlap(btBroadphasePair& pair) { if ((pair.m_pProxy0 == m_cleanProxy) || (pair.m_pProxy1 == m_cleanProxy)) { - m_pairCache->cleanOverlappingPair(pair,m_dispatcher); + m_pairCache->cleanOverlappingPair(pair, m_dispatcher); } return false; } - }; - CleanPairCallback cleanPairs(proxy,this,dispatcher); - - processAllOverlappingPairs(&cleanPairs,dispatcher); + CleanPairCallback cleanPairs(proxy, this, dispatcher); + processAllOverlappingPairs(&cleanPairs, dispatcher); } - -void btSortedOverlappingPairCache::removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher) +void btSortedOverlappingPairCache::removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher) { - - class RemovePairCallback : public btOverlapCallback + class RemovePairCallback : public btOverlapCallback { btBroadphaseProxy* m_obsoleteProxy; public: RemovePairCallback(btBroadphaseProxy* obsoleteProxy) - :m_obsoleteProxy(obsoleteProxy) + : m_obsoleteProxy(obsoleteProxy) { } - virtual bool processOverlap(btBroadphasePair& pair) + virtual bool processOverlap(btBroadphasePair& pair) { return ((pair.m_pProxy0 == m_obsoleteProxy) || - (pair.m_pProxy1 == m_obsoleteProxy)); + (pair.m_pProxy1 == m_obsoleteProxy)); } - }; RemovePairCallback removeCallback(proxy); - processAllOverlappingPairs(&removeCallback,dispatcher); + processAllOverlappingPairs(&removeCallback, dispatcher); } -void btSortedOverlappingPairCache::sortOverlappingPairs(btDispatcher* dispatcher) +void btSortedOverlappingPairCache::sortOverlappingPairs(btDispatcher* dispatcher) { //should already be sorted } - diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.h index 7a38d34f05..a85782bc8a 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCache.h @@ -16,7 +16,6 @@ subject to the following restrictions: #ifndef BT_OVERLAPPING_PAIR_CACHE_H #define BT_OVERLAPPING_PAIR_CACHE_H - #include "btBroadphaseInterface.h" #include "btBroadphaseProxy.h" #include "btOverlappingPairCallback.h" @@ -24,177 +23,163 @@ subject to the following restrictions: #include "LinearMath/btAlignedObjectArray.h" class btDispatcher; -typedef btAlignedObjectArray<btBroadphasePair> btBroadphasePairArray; +typedef btAlignedObjectArray<btBroadphasePair> btBroadphasePairArray; -struct btOverlapCallback +struct btOverlapCallback { virtual ~btOverlapCallback() - {} + { + } //return true for deletion of the pair - virtual bool processOverlap(btBroadphasePair& pair) = 0; - + virtual bool processOverlap(btBroadphasePair& pair) = 0; }; struct btOverlapFilterCallback { virtual ~btOverlapFilterCallback() - {} + { + } // return true when pairs need collision - virtual bool needBroadphaseCollision(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) const = 0; + virtual bool needBroadphaseCollision(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) const = 0; }; - - - - - - -const int BT_NULL_PAIR=0xffffffff; +const int BT_NULL_PAIR = 0xffffffff; ///The btOverlappingPairCache provides an interface for overlapping pair management (add, remove, storage), used by the btBroadphaseInterface broadphases. ///The btHashedOverlappingPairCache and btSortedOverlappingPairCache classes are two implementations. class btOverlappingPairCache : public btOverlappingPairCallback { public: - virtual ~btOverlappingPairCache() {} // this is needed so we can get to the derived class destructor + virtual ~btOverlappingPairCache() {} // this is needed so we can get to the derived class destructor + + virtual btBroadphasePair* getOverlappingPairArrayPtr() = 0; - virtual btBroadphasePair* getOverlappingPairArrayPtr() = 0; - - virtual const btBroadphasePair* getOverlappingPairArrayPtr() const = 0; + virtual const btBroadphasePair* getOverlappingPairArrayPtr() const = 0; - virtual btBroadphasePairArray& getOverlappingPairArray() = 0; + virtual btBroadphasePairArray& getOverlappingPairArray() = 0; - virtual void cleanOverlappingPair(btBroadphasePair& pair,btDispatcher* dispatcher) = 0; + virtual void cleanOverlappingPair(btBroadphasePair& pair, btDispatcher* dispatcher) = 0; virtual int getNumOverlappingPairs() const = 0; - virtual void cleanProxyFromPairs(btBroadphaseProxy* proxy,btDispatcher* dispatcher) = 0; + virtual void cleanProxyFromPairs(btBroadphaseProxy* proxy, btDispatcher* dispatcher) = 0; - virtual void setOverlapFilterCallback(btOverlapFilterCallback* callback) = 0; + virtual void setOverlapFilterCallback(btOverlapFilterCallback* callback) = 0; - virtual void processAllOverlappingPairs(btOverlapCallback*,btDispatcher* dispatcher) = 0; + virtual void processAllOverlappingPairs(btOverlapCallback*, btDispatcher* dispatcher) = 0; - virtual void processAllOverlappingPairs(btOverlapCallback* callback,btDispatcher* dispatcher, const struct btDispatcherInfo& dispatchInfo) + virtual void processAllOverlappingPairs(btOverlapCallback* callback, btDispatcher* dispatcher, const struct btDispatcherInfo& dispatchInfo) { processAllOverlappingPairs(callback, dispatcher); } virtual btBroadphasePair* findPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) = 0; - virtual bool hasDeferredRemoval() = 0; - - virtual void setInternalGhostPairCallback(btOverlappingPairCallback* ghostPairCallback)=0; - - virtual void sortOverlappingPairs(btDispatcher* dispatcher) = 0; + virtual bool hasDeferredRemoval() = 0; + virtual void setInternalGhostPairCallback(btOverlappingPairCallback* ghostPairCallback) = 0; + virtual void sortOverlappingPairs(btDispatcher* dispatcher) = 0; }; /// Hash-space based Pair Cache, thanks to Erin Catto, Box2D, http://www.box2d.org, and Pierre Terdiman, Codercorner, http://codercorner.com -ATTRIBUTE_ALIGNED16(class) btHashedOverlappingPairCache : public btOverlappingPairCache +ATTRIBUTE_ALIGNED16(class) +btHashedOverlappingPairCache : public btOverlappingPairCache { - btBroadphasePairArray m_overlappingPairArray; + btBroadphasePairArray m_overlappingPairArray; btOverlapFilterCallback* m_overlapFilterCallback; protected: - - btAlignedObjectArray<int> m_hashTable; - btAlignedObjectArray<int> m_next; - btOverlappingPairCallback* m_ghostPairCallback; - + btAlignedObjectArray<int> m_hashTable; + btAlignedObjectArray<int> m_next; + btOverlappingPairCallback* m_ghostPairCallback; public: BT_DECLARE_ALIGNED_ALLOCATOR(); - + btHashedOverlappingPairCache(); virtual ~btHashedOverlappingPairCache(); - - void removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher); + void removeOverlappingPairsContainingProxy(btBroadphaseProxy * proxy, btDispatcher * dispatcher); + + virtual void* removeOverlappingPair(btBroadphaseProxy * proxy0, btBroadphaseProxy * proxy1, btDispatcher * dispatcher); - virtual void* removeOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1,btDispatcher* dispatcher); - - SIMD_FORCE_INLINE bool needsBroadphaseCollision(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) const + SIMD_FORCE_INLINE bool needsBroadphaseCollision(btBroadphaseProxy * proxy0, btBroadphaseProxy * proxy1) const { if (m_overlapFilterCallback) - return m_overlapFilterCallback->needBroadphaseCollision(proxy0,proxy1); + return m_overlapFilterCallback->needBroadphaseCollision(proxy0, proxy1); bool collides = (proxy0->m_collisionFilterGroup & proxy1->m_collisionFilterMask) != 0; collides = collides && (proxy1->m_collisionFilterGroup & proxy0->m_collisionFilterMask); - + return collides; } // Add a pair and return the new pair. If the pair already exists, // no new pair is created and the old one is returned. - virtual btBroadphasePair* addOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) + virtual btBroadphasePair* addOverlappingPair(btBroadphaseProxy * proxy0, btBroadphaseProxy * proxy1) { - if (!needsBroadphaseCollision(proxy0,proxy1)) + if (!needsBroadphaseCollision(proxy0, proxy1)) return 0; - return internalAddPair(proxy0,proxy1); + return internalAddPair(proxy0, proxy1); } - - - void cleanProxyFromPairs(btBroadphaseProxy* proxy,btDispatcher* dispatcher); + void cleanProxyFromPairs(btBroadphaseProxy * proxy, btDispatcher * dispatcher); - - virtual void processAllOverlappingPairs(btOverlapCallback*,btDispatcher* dispatcher); + virtual void processAllOverlappingPairs(btOverlapCallback*, btDispatcher * dispatcher); - virtual void processAllOverlappingPairs(btOverlapCallback* callback,btDispatcher* dispatcher, const struct btDispatcherInfo& dispatchInfo); + virtual void processAllOverlappingPairs(btOverlapCallback * callback, btDispatcher * dispatcher, const struct btDispatcherInfo& dispatchInfo); - virtual btBroadphasePair* getOverlappingPairArrayPtr() + virtual btBroadphasePair* getOverlappingPairArrayPtr() { return &m_overlappingPairArray[0]; } - const btBroadphasePair* getOverlappingPairArrayPtr() const + const btBroadphasePair* getOverlappingPairArrayPtr() const { return &m_overlappingPairArray[0]; } - btBroadphasePairArray& getOverlappingPairArray() + btBroadphasePairArray& getOverlappingPairArray() { return m_overlappingPairArray; } - const btBroadphasePairArray& getOverlappingPairArray() const + const btBroadphasePairArray& getOverlappingPairArray() const { return m_overlappingPairArray; } - void cleanOverlappingPair(btBroadphasePair& pair,btDispatcher* dispatcher); - + void cleanOverlappingPair(btBroadphasePair & pair, btDispatcher * dispatcher); - - btBroadphasePair* findPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1); + btBroadphasePair* findPair(btBroadphaseProxy * proxy0, btBroadphaseProxy * proxy1); int GetCount() const { return m_overlappingPairArray.size(); } -// btBroadphasePair* GetPairs() { return m_pairs; } + // btBroadphasePair* GetPairs() { return m_pairs; } btOverlapFilterCallback* getOverlapFilterCallback() { return m_overlapFilterCallback; } - void setOverlapFilterCallback(btOverlapFilterCallback* callback) + void setOverlapFilterCallback(btOverlapFilterCallback * callback) { m_overlapFilterCallback = callback; } - int getNumOverlappingPairs() const + int getNumOverlappingPairs() const { return m_overlappingPairArray.size(); } + private: - - btBroadphasePair* internalAddPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1); + btBroadphasePair* internalAddPair(btBroadphaseProxy * proxy0, btBroadphaseProxy * proxy1); - void growTables(); + void growTables(); SIMD_FORCE_INLINE bool equalsPair(const btBroadphasePair& pair, int proxyId1, int proxyId2) - { + { return pair.m_pProxy0->getUid() == proxyId1 && pair.m_pProxy1->getUid() == proxyId2; } @@ -214,40 +199,37 @@ private: } */ - SIMD_FORCE_INLINE unsigned int getHash(unsigned int proxyId1, unsigned int proxyId2) { unsigned int key = proxyId1 | (proxyId2 << 16); // Thomas Wang's hash key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); key += ~(key << 11); - key ^= (key >> 16); + key ^= (key >> 16); return key; } - - - SIMD_FORCE_INLINE btBroadphasePair* internalFindPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1, int hash) + SIMD_FORCE_INLINE btBroadphasePair* internalFindPair(btBroadphaseProxy * proxy0, btBroadphaseProxy * proxy1, int hash) { int proxyId1 = proxy0->getUid(); int proxyId2 = proxy1->getUid(); - #if 0 // wrong, 'equalsPair' use unsorted uids, copy-past devil striked again. Nat. +#if 0 // wrong, 'equalsPair' use unsorted uids, copy-past devil striked again. Nat. if (proxyId1 > proxyId2) btSwap(proxyId1, proxyId2); - #endif +#endif int index = m_hashTable[hash]; - - while( index != BT_NULL_PAIR && equalsPair(m_overlappingPairArray[index], proxyId1, proxyId2) == false) + + while (index != BT_NULL_PAIR && equalsPair(m_overlappingPairArray[index], proxyId1, proxyId2) == false) { index = m_next[index]; } - if ( index == BT_NULL_PAIR ) + if (index == BT_NULL_PAIR) { return NULL; } @@ -257,155 +239,136 @@ private: return &m_overlappingPairArray[index]; } - virtual bool hasDeferredRemoval() + virtual bool hasDeferredRemoval() { return false; } - virtual void setInternalGhostPairCallback(btOverlappingPairCallback* ghostPairCallback) + virtual void setInternalGhostPairCallback(btOverlappingPairCallback * ghostPairCallback) { m_ghostPairCallback = ghostPairCallback; } - virtual void sortOverlappingPairs(btDispatcher* dispatcher); - - - + virtual void sortOverlappingPairs(btDispatcher * dispatcher); }; - - - ///btSortedOverlappingPairCache maintains the objects with overlapping AABB ///Typically managed by the Broadphase, Axis3Sweep or btSimpleBroadphase -class btSortedOverlappingPairCache : public btOverlappingPairCache +class btSortedOverlappingPairCache : public btOverlappingPairCache { - protected: - //avoid brute-force finding all the time - btBroadphasePairArray m_overlappingPairArray; +protected: + //avoid brute-force finding all the time + btBroadphasePairArray m_overlappingPairArray; - //during the dispatch, check that user doesn't destroy/create proxy - bool m_blockedForChanges; + //during the dispatch, check that user doesn't destroy/create proxy + bool m_blockedForChanges; - ///by default, do the removal during the pair traversal - bool m_hasDeferredRemoval; - - //if set, use the callback instead of the built in filter in needBroadphaseCollision - btOverlapFilterCallback* m_overlapFilterCallback; + ///by default, do the removal during the pair traversal + bool m_hasDeferredRemoval; - btOverlappingPairCallback* m_ghostPairCallback; + //if set, use the callback instead of the built in filter in needBroadphaseCollision + btOverlapFilterCallback* m_overlapFilterCallback; - public: - - btSortedOverlappingPairCache(); - virtual ~btSortedOverlappingPairCache(); + btOverlappingPairCallback* m_ghostPairCallback; - virtual void processAllOverlappingPairs(btOverlapCallback*,btDispatcher* dispatcher); +public: + btSortedOverlappingPairCache(); + virtual ~btSortedOverlappingPairCache(); - void* removeOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1,btDispatcher* dispatcher); + virtual void processAllOverlappingPairs(btOverlapCallback*, btDispatcher* dispatcher); - void cleanOverlappingPair(btBroadphasePair& pair,btDispatcher* dispatcher); - - btBroadphasePair* addOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1); + void* removeOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1, btDispatcher* dispatcher); - btBroadphasePair* findPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1); - - - void cleanProxyFromPairs(btBroadphaseProxy* proxy,btDispatcher* dispatcher); + void cleanOverlappingPair(btBroadphasePair& pair, btDispatcher* dispatcher); - void removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher); + btBroadphasePair* addOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1); + btBroadphasePair* findPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1); - inline bool needsBroadphaseCollision(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) const - { - if (m_overlapFilterCallback) - return m_overlapFilterCallback->needBroadphaseCollision(proxy0,proxy1); + void cleanProxyFromPairs(btBroadphaseProxy* proxy, btDispatcher* dispatcher); - bool collides = (proxy0->m_collisionFilterGroup & proxy1->m_collisionFilterMask) != 0; - collides = collides && (proxy1->m_collisionFilterGroup & proxy0->m_collisionFilterMask); - - return collides; - } - - btBroadphasePairArray& getOverlappingPairArray() - { - return m_overlappingPairArray; - } + void removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher); - const btBroadphasePairArray& getOverlappingPairArray() const - { - return m_overlappingPairArray; - } + inline bool needsBroadphaseCollision(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) const + { + if (m_overlapFilterCallback) + return m_overlapFilterCallback->needBroadphaseCollision(proxy0, proxy1); - + bool collides = (proxy0->m_collisionFilterGroup & proxy1->m_collisionFilterMask) != 0; + collides = collides && (proxy1->m_collisionFilterGroup & proxy0->m_collisionFilterMask); + return collides; + } - btBroadphasePair* getOverlappingPairArrayPtr() - { - return &m_overlappingPairArray[0]; - } + btBroadphasePairArray& getOverlappingPairArray() + { + return m_overlappingPairArray; + } - const btBroadphasePair* getOverlappingPairArrayPtr() const - { - return &m_overlappingPairArray[0]; - } + const btBroadphasePairArray& getOverlappingPairArray() const + { + return m_overlappingPairArray; + } - int getNumOverlappingPairs() const - { - return m_overlappingPairArray.size(); - } - - btOverlapFilterCallback* getOverlapFilterCallback() - { - return m_overlapFilterCallback; - } + btBroadphasePair* getOverlappingPairArrayPtr() + { + return &m_overlappingPairArray[0]; + } - void setOverlapFilterCallback(btOverlapFilterCallback* callback) - { - m_overlapFilterCallback = callback; - } + const btBroadphasePair* getOverlappingPairArrayPtr() const + { + return &m_overlappingPairArray[0]; + } - virtual bool hasDeferredRemoval() - { - return m_hasDeferredRemoval; - } + int getNumOverlappingPairs() const + { + return m_overlappingPairArray.size(); + } - virtual void setInternalGhostPairCallback(btOverlappingPairCallback* ghostPairCallback) - { - m_ghostPairCallback = ghostPairCallback; - } + btOverlapFilterCallback* getOverlapFilterCallback() + { + return m_overlapFilterCallback; + } - virtual void sortOverlappingPairs(btDispatcher* dispatcher); - + void setOverlapFilterCallback(btOverlapFilterCallback* callback) + { + m_overlapFilterCallback = callback; + } -}; + virtual bool hasDeferredRemoval() + { + return m_hasDeferredRemoval; + } + virtual void setInternalGhostPairCallback(btOverlappingPairCallback* ghostPairCallback) + { + m_ghostPairCallback = ghostPairCallback; + } + virtual void sortOverlappingPairs(btDispatcher* dispatcher); +}; ///btNullPairCache skips add/removal of overlapping pairs. Userful for benchmarking and unit testing. class btNullPairCache : public btOverlappingPairCache { - - btBroadphasePairArray m_overlappingPairArray; + btBroadphasePairArray m_overlappingPairArray; public: - - virtual btBroadphasePair* getOverlappingPairArrayPtr() + virtual btBroadphasePair* getOverlappingPairArrayPtr() { return &m_overlappingPairArray[0]; } - const btBroadphasePair* getOverlappingPairArrayPtr() const + const btBroadphasePair* getOverlappingPairArrayPtr() const { return &m_overlappingPairArray[0]; } - btBroadphasePairArray& getOverlappingPairArray() + btBroadphasePairArray& getOverlappingPairArray() { return m_overlappingPairArray; } - - virtual void cleanOverlappingPair(btBroadphasePair& /*pair*/,btDispatcher* /*dispatcher*/) - { + virtual void cleanOverlappingPair(btBroadphasePair& /*pair*/, btDispatcher* /*dispatcher*/) + { } virtual int getNumOverlappingPairs() const @@ -413,16 +376,15 @@ public: return 0; } - virtual void cleanProxyFromPairs(btBroadphaseProxy* /*proxy*/,btDispatcher* /*dispatcher*/) + virtual void cleanProxyFromPairs(btBroadphaseProxy* /*proxy*/, btDispatcher* /*dispatcher*/) { - } - virtual void setOverlapFilterCallback(btOverlapFilterCallback* /*callback*/) + virtual void setOverlapFilterCallback(btOverlapFilterCallback* /*callback*/) { } - virtual void processAllOverlappingPairs(btOverlapCallback*,btDispatcher* /*dispatcher*/) + virtual void processAllOverlappingPairs(btOverlapCallback*, btDispatcher* /*dispatcher*/) { } @@ -431,39 +393,33 @@ public: return 0; } - virtual bool hasDeferredRemoval() + virtual bool hasDeferredRemoval() { return true; } - virtual void setInternalGhostPairCallback(btOverlappingPairCallback* /* ghostPairCallback */) + virtual void setInternalGhostPairCallback(btOverlappingPairCallback* /* ghostPairCallback */) { - } - virtual btBroadphasePair* addOverlappingPair(btBroadphaseProxy* /*proxy0*/,btBroadphaseProxy* /*proxy1*/) + virtual btBroadphasePair* addOverlappingPair(btBroadphaseProxy* /*proxy0*/, btBroadphaseProxy* /*proxy1*/) { return 0; } - virtual void* removeOverlappingPair(btBroadphaseProxy* /*proxy0*/,btBroadphaseProxy* /*proxy1*/,btDispatcher* /*dispatcher*/) + virtual void* removeOverlappingPair(btBroadphaseProxy* /*proxy0*/, btBroadphaseProxy* /*proxy1*/, btDispatcher* /*dispatcher*/) { return 0; } - virtual void removeOverlappingPairsContainingProxy(btBroadphaseProxy* /*proxy0*/,btDispatcher* /*dispatcher*/) + virtual void removeOverlappingPairsContainingProxy(btBroadphaseProxy* /*proxy0*/, btDispatcher* /*dispatcher*/) { } - - virtual void sortOverlappingPairs(btDispatcher* dispatcher) + + virtual void sortOverlappingPairs(btDispatcher* dispatcher) { - (void) dispatcher; + (void)dispatcher; } - - }; - -#endif //BT_OVERLAPPING_PAIR_CACHE_H - - +#endif //BT_OVERLAPPING_PAIR_CACHE_H diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCallback.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCallback.h index 3e069fa5e2..d16c72542f 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCallback.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btOverlappingPairCallback.h @@ -18,26 +18,24 @@ subject to the following restrictions: #define OVERLAPPING_PAIR_CALLBACK_H class btDispatcher; -struct btBroadphasePair; +struct btBroadphasePair; ///The btOverlappingPairCallback class is an additional optional broadphase user callback for adding/removing overlapping pairs, similar interface to btOverlappingPairCache. class btOverlappingPairCallback { protected: - btOverlappingPairCallback() {} - + btOverlappingPairCallback() {} + public: virtual ~btOverlappingPairCallback() { - } - - virtual btBroadphasePair* addOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) = 0; - virtual void* removeOverlappingPair(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1,btDispatcher* dispatcher) = 0; + virtual btBroadphasePair* addOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) = 0; - virtual void removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy0,btDispatcher* dispatcher) = 0; + virtual void* removeOverlappingPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1, btDispatcher* dispatcher) = 0; + virtual void removeOverlappingPairsContainingProxy(btBroadphaseProxy* proxy0, btDispatcher* dispatcher) = 0; }; -#endif //OVERLAPPING_PAIR_CALLBACK_H +#endif //OVERLAPPING_PAIR_CALLBACK_H diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp index 875d89c53e..b814fd84d8 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp @@ -21,43 +21,38 @@ subject to the following restrictions: #define RAYAABB2 -btQuantizedBvh::btQuantizedBvh() : - m_bulletVersion(BT_BULLET_VERSION), - m_useQuantization(false), - //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY) - m_traversalMode(TRAVERSAL_STACKLESS) - //m_traversalMode(TRAVERSAL_RECURSIVE) - ,m_subtreeHeaderCount(0) //PCK: add this line +btQuantizedBvh::btQuantizedBvh() : m_bulletVersion(BT_BULLET_VERSION), + m_useQuantization(false), + //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY) + m_traversalMode(TRAVERSAL_STACKLESS) + //m_traversalMode(TRAVERSAL_RECURSIVE) + , + m_subtreeHeaderCount(0) //PCK: add this line { - m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY); - m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY); + m_bvhAabbMin.setValue(-SIMD_INFINITY, -SIMD_INFINITY, -SIMD_INFINITY); + m_bvhAabbMax.setValue(SIMD_INFINITY, SIMD_INFINITY, SIMD_INFINITY); } - - - - void btQuantizedBvh::buildInternal() { ///assumes that caller filled in the m_quantizedLeafNodes m_useQuantization = true; int numLeafNodes = 0; - + if (m_useQuantization) { //now we have an array of leafnodes in m_leafNodes numLeafNodes = m_quantizedLeafNodes.size(); - m_quantizedContiguousNodes.resize(2*numLeafNodes); - + m_quantizedContiguousNodes.resize(2 * numLeafNodes); } m_curNodeIndex = 0; - buildTree(0,numLeafNodes); + buildTree(0, numLeafNodes); ///if the entire tree is small then subtree size, we need to create a header info for the tree - if(m_useQuantization && !m_SubtreeHeaders.size()) + if (m_useQuantization && !m_SubtreeHeaders.size()) { btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand(); subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]); @@ -73,29 +68,24 @@ void btQuantizedBvh::buildInternal() m_leafNodes.clear(); } - - ///just for debugging, to visualize the individual patches/subtrees #ifdef DEBUG_PATCH_COLORS -btVector3 color[4]= -{ - btVector3(1,0,0), - btVector3(0,1,0), - btVector3(0,0,1), - btVector3(0,1,1) -}; -#endif //DEBUG_PATCH_COLORS - - +btVector3 color[4] = + { + btVector3(1, 0, 0), + btVector3(0, 1, 0), + btVector3(0, 0, 1), + btVector3(0, 1, 1)}; +#endif //DEBUG_PATCH_COLORS -void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin) +void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin, const btVector3& bvhAabbMax, btScalar quantizationMargin) { //enlarge the AABB to avoid division by zero when initializing the quantization values - btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin); + btVector3 clampValue(quantizationMargin, quantizationMargin, quantizationMargin); m_bvhAabbMin = bvhAabbMin - clampValue; m_bvhAabbMax = bvhAabbMax + clampValue; btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin; - m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize; + m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize; m_useQuantization = true; @@ -103,25 +93,22 @@ void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btV unsigned short vecIn[3]; btVector3 v; { - quantize(vecIn,m_bvhAabbMin,false); + quantize(vecIn, m_bvhAabbMin, false); v = unQuantize(vecIn); - m_bvhAabbMin.setMin(v-clampValue); + m_bvhAabbMin.setMin(v - clampValue); } - aabbSize = m_bvhAabbMax - m_bvhAabbMin; - m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize; + aabbSize = m_bvhAabbMax - m_bvhAabbMin; + m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize; { - quantize(vecIn,m_bvhAabbMax,true); + quantize(vecIn, m_bvhAabbMax, true); v = unQuantize(vecIn); - m_bvhAabbMax.setMax(v+clampValue); + m_bvhAabbMax.setMax(v + clampValue); } aabbSize = m_bvhAabbMax - m_bvhAabbMin; - m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize; + m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize; } } - - - btQuantizedBvh::~btQuantizedBvh() { } @@ -129,104 +116,100 @@ btQuantizedBvh::~btQuantizedBvh() #ifdef DEBUG_TREE_BUILDING int gStackDepth = 0; int gMaxStackDepth = 0; -#endif //DEBUG_TREE_BUILDING +#endif //DEBUG_TREE_BUILDING -void btQuantizedBvh::buildTree (int startIndex,int endIndex) +void btQuantizedBvh::buildTree(int startIndex, int endIndex) { #ifdef DEBUG_TREE_BUILDING gStackDepth++; if (gStackDepth > gMaxStackDepth) gMaxStackDepth = gStackDepth; -#endif //DEBUG_TREE_BUILDING - +#endif //DEBUG_TREE_BUILDING int splitAxis, splitIndex, i; - int numIndices =endIndex-startIndex; + int numIndices = endIndex - startIndex; int curIndex = m_curNodeIndex; - btAssert(numIndices>0); + btAssert(numIndices > 0); - if (numIndices==1) + if (numIndices == 1) { #ifdef DEBUG_TREE_BUILDING gStackDepth--; -#endif //DEBUG_TREE_BUILDING - - assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex); +#endif //DEBUG_TREE_BUILDING + + assignInternalNodeFromLeafNode(m_curNodeIndex, startIndex); m_curNodeIndex++; - return; + return; } //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'. - - splitAxis = calcSplittingAxis(startIndex,endIndex); - splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis); + splitAxis = calcSplittingAxis(startIndex, endIndex); + + splitIndex = sortAndCalcSplittingIndex(startIndex, endIndex, splitAxis); int internalNodeIndex = m_curNodeIndex; - + //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value. //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values - setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization - setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization - - - for (i=startIndex;i<endIndex;i++) + setInternalNodeAabbMin(m_curNodeIndex, m_bvhAabbMax); //can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization + setInternalNodeAabbMax(m_curNodeIndex, m_bvhAabbMin); //can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization + + for (i = startIndex; i < endIndex; i++) { - mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i)); + mergeInternalNodeAabb(m_curNodeIndex, getAabbMin(i), getAabbMax(i)); } m_curNodeIndex++; - //internalNode->m_escapeIndex; - + int leftChildNodexIndex = m_curNodeIndex; //build left child tree - buildTree(startIndex,splitIndex); + buildTree(startIndex, splitIndex); int rightChildNodexIndex = m_curNodeIndex; //build right child tree - buildTree(splitIndex,endIndex); + buildTree(splitIndex, endIndex); #ifdef DEBUG_TREE_BUILDING gStackDepth--; -#endif //DEBUG_TREE_BUILDING +#endif //DEBUG_TREE_BUILDING int escapeIndex = m_curNodeIndex - curIndex; if (m_useQuantization) { //escapeIndex is the number of nodes of this subtree - const int sizeQuantizedNode =sizeof(btQuantizedBvhNode); + const int sizeQuantizedNode = sizeof(btQuantizedBvhNode); const int treeSizeInBytes = escapeIndex * sizeQuantizedNode; if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES) { - updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex); + updateSubtreeHeaders(leftChildNodexIndex, rightChildNodexIndex); } - } else + } + else { - } - setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex); - + setInternalNodeEscapeIndex(internalNodeIndex, escapeIndex); } -void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex) +void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex) { btAssert(m_useQuantization); btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex]; int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex(); - int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); - + int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); + btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex]; int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex(); - int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); + int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); - if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) + if (leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) { btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand(); subtree.setAabbFromQuantizeNode(leftChildNode); @@ -234,7 +217,7 @@ void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChild subtree.m_subtreeSize = leftSubTreeSize; } - if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) + if (rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) { btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand(); subtree.setAabbFromQuantizeNode(rightChildNode); @@ -246,32 +229,31 @@ void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChild m_subtreeHeaderCount = m_SubtreeHeaders.size(); } - -int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis) +int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis) { int i; - int splitIndex =startIndex; + int splitIndex = startIndex; int numIndices = endIndex - startIndex; btScalar splitValue; - btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.)); - for (i=startIndex;i<endIndex;i++) + btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.)); + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); - means+=center; + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); + means += center; } - means *= (btScalar(1.)/(btScalar)numIndices); - + means *= (btScalar(1.) / (btScalar)numIndices); + splitValue = means[splitAxis]; - + //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'. - for (i=startIndex;i<endIndex;i++) + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); if (center[splitAxis] > splitValue) { //swap - swapLeafNodes(i,splitIndex); + swapLeafNodes(i, splitIndex); splitIndex++; } } @@ -281,56 +263,53 @@ int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int sp //unbalanced1 is unsafe: it can cause stack overflows //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1))); - //unbalanced2 should work too: always use center (perfect balanced trees) + //unbalanced2 should work too: always use center (perfect balanced trees) //bool unbalanced2 = true; //this should be safe too: - int rangeBalancedIndices = numIndices/3; - bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices))); - + int rangeBalancedIndices = numIndices / 3; + bool unbalanced = ((splitIndex <= (startIndex + rangeBalancedIndices)) || (splitIndex >= (endIndex - 1 - rangeBalancedIndices))); + if (unbalanced) { - splitIndex = startIndex+ (numIndices>>1); + splitIndex = startIndex + (numIndices >> 1); } - bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex)); + bool unbal = (splitIndex == startIndex) || (splitIndex == (endIndex)); (void)unbal; btAssert(!unbal); return splitIndex; } - -int btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex) +int btQuantizedBvh::calcSplittingAxis(int startIndex, int endIndex) { int i; - btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.)); - btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.)); - int numIndices = endIndex-startIndex; + btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.)); + btVector3 variance(btScalar(0.), btScalar(0.), btScalar(0.)); + int numIndices = endIndex - startIndex; - for (i=startIndex;i<endIndex;i++) + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); - means+=center; + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); + means += center; } - means *= (btScalar(1.)/(btScalar)numIndices); - - for (i=startIndex;i<endIndex;i++) + means *= (btScalar(1.) / (btScalar)numIndices); + + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); - btVector3 diff2 = center-means; + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); + btVector3 diff2 = center - means; diff2 = diff2 * diff2; variance += diff2; } - variance *= (btScalar(1.)/ ((btScalar)numIndices-1) ); - + variance *= (btScalar(1.) / ((btScalar)numIndices - 1)); + return variance.maxAxis(); } - - -void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const +void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const { //either choose recursive traversal (walkTree) or stackless (walkStacklessTree) @@ -339,38 +318,37 @@ void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallb ///quantize query AABB unsigned short int quantizedQueryAabbMin[3]; unsigned short int quantizedQueryAabbMax[3]; - quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0); - quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1); + quantizeWithClamp(quantizedQueryAabbMin, aabbMin, 0); + quantizeWithClamp(quantizedQueryAabbMax, aabbMax, 1); switch (m_traversalMode) { - case TRAVERSAL_STACKLESS: - walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex); - break; - case TRAVERSAL_STACKLESS_CACHE_FRIENDLY: - walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); - break; - case TRAVERSAL_RECURSIVE: + case TRAVERSAL_STACKLESS: + walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, 0, m_curNodeIndex); + break; + case TRAVERSAL_STACKLESS_CACHE_FRIENDLY: + walkStacklessQuantizedTreeCacheFriendly(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); + break; + case TRAVERSAL_RECURSIVE: { const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0]; - walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); + walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); } break; - default: - //unsupported - btAssert(0); + default: + //unsupported + btAssert(0); } - } else + } + else { - walkStacklessTree(nodeCallback,aabbMin,aabbMax); + walkStacklessTree(nodeCallback, aabbMin, aabbMax); } } - int maxIterations = 0; - -void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const +void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const { btAssert(!m_useQuantization); @@ -384,24 +362,25 @@ void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const while (curIndex < m_curNodeIndex) { //catch bugs in tree data - btAssert (walkIterations < m_curNodeIndex); + btAssert(walkIterations < m_curNodeIndex); walkIterations++; - aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg); + aabbOverlap = TestAabbAgainstAabb2(aabbMin, aabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg); isLeafNode = rootNode->m_escapeIndex == -1; - + //PCK: unsigned instead of bool if (isLeafNode && (aabbOverlap != 0)) { - nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex); - } - + nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex); + } + //PCK: unsigned instead of bool if ((aabbOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->m_escapeIndex; rootNode += escapeIndex; @@ -410,7 +389,6 @@ void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const } if (maxIterations < walkIterations) maxIterations = walkIterations; - } /* @@ -434,39 +412,38 @@ void btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback } */ -void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const +void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode, btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const { btAssert(m_useQuantization); - + bool isLeafNode; //PCK: unsigned instead of bool unsigned aabbOverlap; //PCK: unsigned instead of bool - aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax); + aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, currentNode->m_quantizedAabbMin, currentNode->m_quantizedAabbMax); isLeafNode = currentNode->isLeafNode(); - + //PCK: unsigned instead of bool if (aabbOverlap != 0) { if (isLeafNode) { - nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex()); - } else + nodeCallback->processNode(currentNode->getPartId(), currentNode->getTriangleIndex()); + } + else { //process left and right children - const btQuantizedBvhNode* leftChildNode = currentNode+1; - walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); + const btQuantizedBvhNode* leftChildNode = currentNode + 1; + walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); - const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex(); - walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); + const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode + 1 : leftChildNode + leftChildNode->getEscapeIndex(); + walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); } - } + } } - - -void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const +void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const { btAssert(!m_useQuantization); @@ -475,11 +452,11 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall int walkIterations = 0; bool isLeafNode; //PCK: unsigned instead of bool - unsigned aabbOverlap=0; - unsigned rayBoxOverlap=0; + unsigned aabbOverlap = 0; + unsigned rayBoxOverlap = 0; btScalar lambda_max = 1.0; - - /* Quick pruning by quantized box */ + + /* Quick pruning by quantized box */ btVector3 rayAabbMin = raySource; btVector3 rayAabbMax = raySource; rayAabbMin.setMin(rayTarget); @@ -490,15 +467,15 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall rayAabbMax += aabbMax; #ifdef RAYAABB2 - btVector3 rayDir = (rayTarget-raySource); - rayDir.normalize (); - lambda_max = rayDir.dot(rayTarget-raySource); + btVector3 rayDir = (rayTarget - raySource); + rayDir.normalize(); + lambda_max = rayDir.dot(rayTarget - raySource); ///what about division by zero? --> just set rayDirection[i] to 1.0 btVector3 rayDirectionInverse; rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0]; rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1]; rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2]; - unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0}; + unsigned int sign[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0}; #endif btVector3 bounds[2]; @@ -507,7 +484,7 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall { btScalar param = 1.0; //catch bugs in tree data - btAssert (walkIterations < m_curNodeIndex); + btAssert(walkIterations < m_curNodeIndex); walkIterations++; @@ -517,34 +494,35 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall bounds[0] -= aabbMax; bounds[1] -= aabbMin; - aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg); + aabbOverlap = TestAabbAgainstAabb2(rayAabbMin, rayAabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg); //perhaps profile if it is worth doing the aabbOverlap test first #ifdef RAYAABB2 - ///careful with this check: need to check division by zero (above) and fix the unQuantize method - ///thanks Joerg/hiker for the reproduction case! - ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858 - rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false; + ///careful with this check: need to check division by zero (above) and fix the unQuantize method + ///thanks Joerg/hiker for the reproduction case! + ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858 + rayBoxOverlap = aabbOverlap ? btRayAabb2(raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false; #else btVector3 normal; - rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal); + rayBoxOverlap = btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal); #endif isLeafNode = rootNode->m_escapeIndex == -1; - + //PCK: unsigned instead of bool if (isLeafNode && (rayBoxOverlap != 0)) { - nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex); - } - + nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex); + } + //PCK: unsigned instead of bool if ((rayBoxOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->m_escapeIndex; rootNode += escapeIndex; @@ -553,15 +531,12 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall } if (maxIterations < walkIterations) maxIterations = walkIterations; - } - - -void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const +void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const { btAssert(m_useQuantization); - + int curIndex = startNodeIndex; int walkIterations = 0; int subTreeSize = endNodeIndex - startNodeIndex; @@ -569,7 +544,7 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex]; int escapeIndex; - + bool isLeafNode; //PCK: unsigned instead of bool unsigned boxBoxOverlap = 0; @@ -578,14 +553,14 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* btScalar lambda_max = 1.0; #ifdef RAYAABB2 - btVector3 rayDirection = (rayTarget-raySource); - rayDirection.normalize (); - lambda_max = rayDirection.dot(rayTarget-raySource); + btVector3 rayDirection = (rayTarget - raySource); + rayDirection.normalize(); + lambda_max = rayDirection.dot(rayTarget - raySource); ///what about division by zero? --> just set rayDirection[i] to 1.0 rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0]; rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1]; rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2]; - unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0}; + unsigned int sign[3] = {rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0}; #endif /* Quick pruning by quantized box */ @@ -600,37 +575,36 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* unsigned short int quantizedQueryAabbMin[3]; unsigned short int quantizedQueryAabbMax[3]; - quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0); - quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1); + quantizeWithClamp(quantizedQueryAabbMin, rayAabbMin, 0); + quantizeWithClamp(quantizedQueryAabbMax, rayAabbMax, 1); while (curIndex < endNodeIndex) { - //#define VISUALLY_ANALYZE_BVH 1 #ifdef VISUALLY_ANALYZE_BVH //some code snippet to debugDraw aabb, to visually analyze bvh structure static int drawPatch = 0; //need some global access to a debugDrawer extern btIDebugDraw* debugDrawerPtr; - if (curIndex==drawPatch) + if (curIndex == drawPatch) { - btVector3 aabbMin,aabbMax; + btVector3 aabbMin, aabbMax; aabbMin = unQuantize(rootNode->m_quantizedAabbMin); aabbMax = unQuantize(rootNode->m_quantizedAabbMax); - btVector3 color(1,0,0); - debugDrawerPtr->drawAabb(aabbMin,aabbMax,color); + btVector3 color(1, 0, 0); + debugDrawerPtr->drawAabb(aabbMin, aabbMax, color); } -#endif//VISUALLY_ANALYZE_BVH +#endif //VISUALLY_ANALYZE_BVH //catch bugs in tree data - btAssert (walkIterations < subTreeSize); + btAssert(walkIterations < subTreeSize); walkIterations++; //PCK: unsigned instead of bool // only interested if this is closer than any previous hit btScalar param = 1.0; rayBoxOverlap = 0; - boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax); + boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax); isLeafNode = rootNode->isLeafNode(); if (boxBoxOverlap) { @@ -655,24 +629,25 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858 //BT_PROFILE("btRayAabb2"); - rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max); - + rayBoxOverlap = btRayAabb2(raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max); + #else - rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal); + rayBoxOverlap = true; //btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal); #endif } - + if (isLeafNode && rayBoxOverlap) { - nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex()); + nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex()); } - + //PCK: unsigned instead of bool if ((rayBoxOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->getEscapeIndex(); rootNode += escapeIndex; @@ -681,13 +656,12 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* } if (maxIterations < walkIterations) maxIterations = walkIterations; - } -void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const +void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const { btAssert(m_useQuantization); - + int curIndex = startNodeIndex; int walkIterations = 0; int subTreeSize = endNodeIndex - startNodeIndex; @@ -695,49 +669,49 @@ void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallb const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex]; int escapeIndex; - + bool isLeafNode; //PCK: unsigned instead of bool unsigned aabbOverlap; while (curIndex < endNodeIndex) { - //#define VISUALLY_ANALYZE_BVH 1 #ifdef VISUALLY_ANALYZE_BVH //some code snippet to debugDraw aabb, to visually analyze bvh structure static int drawPatch = 0; //need some global access to a debugDrawer extern btIDebugDraw* debugDrawerPtr; - if (curIndex==drawPatch) + if (curIndex == drawPatch) { - btVector3 aabbMin,aabbMax; + btVector3 aabbMin, aabbMax; aabbMin = unQuantize(rootNode->m_quantizedAabbMin); aabbMax = unQuantize(rootNode->m_quantizedAabbMax); - btVector3 color(1,0,0); - debugDrawerPtr->drawAabb(aabbMin,aabbMax,color); + btVector3 color(1, 0, 0); + debugDrawerPtr->drawAabb(aabbMin, aabbMax, color); } -#endif//VISUALLY_ANALYZE_BVH +#endif //VISUALLY_ANALYZE_BVH //catch bugs in tree data - btAssert (walkIterations < subTreeSize); + btAssert(walkIterations < subTreeSize); walkIterations++; //PCK: unsigned instead of bool - aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax); + aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax); isLeafNode = rootNode->isLeafNode(); - + if (isLeafNode && aabbOverlap) { - nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex()); - } - + nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex()); + } + //PCK: unsigned instead of bool if ((aabbOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->getEscapeIndex(); rootNode += escapeIndex; @@ -746,40 +720,36 @@ void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallb } if (maxIterations < walkIterations) maxIterations = walkIterations; - } //This traversal can be called from Playstation 3 SPU -void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const +void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const { btAssert(m_useQuantization); int i; - - for (i=0;i<this->m_SubtreeHeaders.size();i++) + for (i = 0; i < this->m_SubtreeHeaders.size(); i++) { const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i]; //PCK: unsigned instead of bool - unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax); + unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, subtree.m_quantizedAabbMin, subtree.m_quantizedAabbMax); if (overlap != 0) { - walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax, - subtree.m_rootNodeIndex, - subtree.m_rootNodeIndex+subtree.m_subtreeSize); + walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, + subtree.m_rootNodeIndex, + subtree.m_rootNodeIndex + subtree.m_subtreeSize); } } } - -void btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const +void btQuantizedBvh::reportRayOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const { - reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0)); + reportBoxCastOverlappingNodex(nodeCallback, raySource, rayTarget, btVector3(0, 0, 0), btVector3(0, 0, 0)); } - -void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const +void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax) const { //always use stackless @@ -803,31 +773,31 @@ void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCa reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax); } */ - } - -void btQuantizedBvh::swapLeafNodes(int i,int splitIndex) +void btQuantizedBvh::swapLeafNodes(int i, int splitIndex) { if (m_useQuantization) { - btQuantizedBvhNode tmp = m_quantizedLeafNodes[i]; - m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex]; - m_quantizedLeafNodes[splitIndex] = tmp; - } else + btQuantizedBvhNode tmp = m_quantizedLeafNodes[i]; + m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex]; + m_quantizedLeafNodes[splitIndex] = tmp; + } + else { - btOptimizedBvhNode tmp = m_leafNodes[i]; - m_leafNodes[i] = m_leafNodes[splitIndex]; - m_leafNodes[splitIndex] = tmp; + btOptimizedBvhNode tmp = m_leafNodes[i]; + m_leafNodes[i] = m_leafNodes[splitIndex]; + m_leafNodes[splitIndex] = tmp; } } -void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex) +void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex) { if (m_useQuantization) { m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex]; - } else + } + else { m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex]; } @@ -844,11 +814,10 @@ static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1; static const unsigned BVH_ALIGNMENT_BLOCKS = 2; #endif - unsigned int btQuantizedBvh::getAlignmentSerializationPadding() { // I changed this to 0 since the extra padding is not needed or used. - return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT; + return 0; //BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT; } unsigned btQuantizedBvh::calculateSerializeBufferSize() const @@ -862,12 +831,12 @@ unsigned btQuantizedBvh::calculateSerializeBufferSize() const return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode); } -bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const +bool btQuantizedBvh::serialize(void* o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const { btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size()); m_subtreeHeaderCount = m_SubtreeHeaders.size(); -/* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) + /* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) { ///check alignedment for buffer? btAssert(0); @@ -875,7 +844,7 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe } */ - btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer; + btQuantizedBvh* targetBvh = (btQuantizedBvh*)o_alignedDataBuffer; // construct the class so the virtual function table, etc will be set up // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor @@ -885,10 +854,9 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe { targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex)); - - btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin); - btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax); - btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization); + btSwapVector3Endian(m_bvhAabbMin, targetBvh->m_bvhAabbMin); + btSwapVector3Endian(m_bvhAabbMax, targetBvh->m_bvhAabbMax); + btSwapVector3Endian(m_bvhQuantization, targetBvh->m_bvhQuantization); targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode); targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount)); @@ -905,12 +873,12 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe targetBvh->m_useQuantization = m_useQuantization; - unsigned char *nodeData = (unsigned char *)targetBvh; + unsigned char* nodeData = (unsigned char*)targetBvh; nodeData += sizeof(btQuantizedBvh); - - unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + + unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; - + int nodeCount = m_curNodeIndex; if (m_useQuantization) @@ -936,7 +904,6 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe { for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++) { - targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]; targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]; targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]; @@ -946,8 +913,6 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]; targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex; - - } } nodeData += sizeof(btQuantizedBvhNode) * nodeCount; @@ -993,7 +958,7 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0); } - sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; // Now serialize the subtree headers @@ -1048,14 +1013,13 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe return true; } -btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian) +btQuantizedBvh* btQuantizedBvh::deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian) { - - if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) + if (i_alignedDataBuffer == NULL) // || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) { return NULL; } - btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer; + btQuantizedBvh* bvh = (btQuantizedBvh*)i_alignedDataBuffer; if (i_swapEndian) { @@ -1077,12 +1041,12 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un return NULL; } - unsigned char *nodeData = (unsigned char *)bvh; + unsigned char* nodeData = (unsigned char*)bvh; nodeData += sizeof(btQuantizedBvh); - - unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + + unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; - + int nodeCount = bvh->m_curNodeIndex; // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor @@ -1120,7 +1084,7 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un { btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg); btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg); - + bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex)); bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart)); bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex)); @@ -1129,7 +1093,7 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un nodeData += sizeof(btOptimizedBvhNode) * nodeCount; } - sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; // Now serialize the subtree headers @@ -1155,13 +1119,11 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un } // Constructor that prevents btVector3's default constructor from being called -btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) : -m_bvhAabbMin(self.m_bvhAabbMin), -m_bvhAabbMax(self.m_bvhAabbMax), -m_bvhQuantization(self.m_bvhQuantization), -m_bulletVersion(BT_BULLET_VERSION) +btQuantizedBvh::btQuantizedBvh(btQuantizedBvh& self, bool /* ownsMemory */) : m_bvhAabbMin(self.m_bvhAabbMin), + m_bvhAabbMax(self.m_bvhAabbMax), + m_bvhQuantization(self.m_bvhQuantization), + m_bulletVersion(BT_BULLET_VERSION) { - } void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData) @@ -1171,8 +1133,8 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization); m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex; - m_useQuantization = quantizedBvhFloatData.m_useQuantization!=0; - + m_useQuantization = quantizedBvhFloatData.m_useQuantization != 0; + { int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes; m_contiguousNodes.resize(numElem); @@ -1181,7 +1143,7 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB { btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg); m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg); @@ -1195,11 +1157,11 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB { int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes; m_quantizedContiguousNodes.resize(numElem); - + if (numElem) { btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex; m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; @@ -1213,16 +1175,16 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB } m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode); - + { int numElem = quantizedBvhFloatData.m_numSubtreeHeaders; m_SubtreeHeaders.resize(numElem); if (numElem) { btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { - m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ; + m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1]; m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2]; m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0]; @@ -1242,8 +1204,8 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization); m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex; - m_useQuantization = quantizedBvhDoubleData.m_useQuantization!=0; - + m_useQuantization = quantizedBvhDoubleData.m_useQuantization != 0; + { int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes; m_contiguousNodes.resize(numElem); @@ -1252,7 +1214,7 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize { btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg); m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg); @@ -1266,11 +1228,11 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize { int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes; m_quantizedContiguousNodes.resize(numElem); - + if (numElem) { btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex; m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; @@ -1284,16 +1246,16 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize } m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode); - + { int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders; m_SubtreeHeaders.resize(numElem); if (numElem) { btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { - m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ; + m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1]; m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2]; m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0]; @@ -1304,32 +1266,29 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize } } } - } - - ///fills the dataBuffer and returns the struct name (and 0 on failure) -const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const +const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const { btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer; - + m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax); m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin); m_bvhQuantization.serialize(quantizedData->m_bvhQuantization); quantizedData->m_curNodeIndex = m_curNodeIndex; quantizedData->m_useQuantization = m_useQuantization; - + quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size(); - quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*) (m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0); + quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*)(m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0); if (quantizedData->m_contiguousNodesPtr) { int sz = sizeof(btOptimizedBvhNodeData); int numElem = m_contiguousNodes.size(); - btChunk* chunk = serializer->allocate(sz,numElem); + btChunk* chunk = serializer->allocate(sz, numElem); btOptimizedBvhNodeData* memPtr = (btOptimizedBvhNodeData*)chunk->m_oldPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg); m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg); @@ -1339,19 +1298,19 @@ const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer // Fill padding with zeros to appease msan. memset(memPtr->m_pad, 0, sizeof(memPtr->m_pad)); } - serializer->finalizeChunk(chunk,"btOptimizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_contiguousNodes[0]); + serializer->finalizeChunk(chunk, "btOptimizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_contiguousNodes[0]); } quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size(); -// printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes); - quantizedData->m_quantizedContiguousNodesPtr =(btQuantizedBvhNodeData*) (m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0); + // printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes); + quantizedData->m_quantizedContiguousNodesPtr = (btQuantizedBvhNodeData*)(m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0); if (quantizedData->m_quantizedContiguousNodesPtr) { int sz = sizeof(btQuantizedBvhNodeData); int numElem = m_quantizedContiguousNodes.size(); - btChunk* chunk = serializer->allocate(sz,numElem); + btChunk* chunk = serializer->allocate(sz, numElem); btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->m_oldPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex; memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0]; @@ -1361,20 +1320,20 @@ const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1]; memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2]; } - serializer->finalizeChunk(chunk,"btQuantizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_quantizedContiguousNodes[0]); + serializer->finalizeChunk(chunk, "btQuantizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_quantizedContiguousNodes[0]); } quantizedData->m_traversalMode = int(m_traversalMode); quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size(); - quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*) (m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0); + quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*)(m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0); if (quantizedData->m_subTreeInfoPtr) { int sz = sizeof(btBvhSubtreeInfoData); int numElem = m_SubtreeHeaders.size(); - btChunk* chunk = serializer->allocate(sz,numElem); + btChunk* chunk = serializer->allocate(sz, numElem); btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->m_oldPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0]; memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1]; @@ -1386,12 +1345,7 @@ const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex; memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize; } - serializer->finalizeChunk(chunk,"btBvhSubtreeInfoData",BT_ARRAY_CODE,(void*)&m_SubtreeHeaders[0]); + serializer->finalizeChunk(chunk, "btBvhSubtreeInfoData", BT_ARRAY_CODE, (void*)&m_SubtreeHeaders[0]); } return btQuantizedBvhDataName; } - - - - - diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.h index 3dd5ac9bb6..1c47b9ccf2 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.h @@ -22,11 +22,11 @@ class btSerializer; #ifdef DEBUG_CHECK_DEQUANTIZATION #ifdef __SPU__ #define printf spu_printf -#endif //__SPU__ +#endif //__SPU__ #include <stdio.h> #include <stdlib.h> -#endif //DEBUG_CHECK_DEQUANTIZATION +#endif //DEBUG_CHECK_DEQUANTIZATION #include "LinearMath/btVector3.h" #include "LinearMath/btAlignedAllocator.h" @@ -41,13 +41,10 @@ class btSerializer; #define btQuantizedBvhDataName "btQuantizedBvhFloatData" #endif - - //http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrf__m128.asp - //Note: currently we have 16 bytes per quantized node -#define MAX_SUBTREE_SIZE_IN_BYTES 2048 +#define MAX_SUBTREE_SIZE_IN_BYTES 2048 // 10 gives the potential for 1024 parts, with at most 2^21 (2097152) (minus one // actually) triangles each (since the sign bit is reserved @@ -55,15 +52,16 @@ class btSerializer; ///btQuantizedBvhNode is a compressed aabb node, 16 bytes. ///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range). -ATTRIBUTE_ALIGNED16 (struct) btQuantizedBvhNode +ATTRIBUTE_ALIGNED16(struct) +btQuantizedBvhNode { BT_DECLARE_ALIGNED_ALLOCATOR(); //12 bytes - unsigned short int m_quantizedAabbMin[3]; - unsigned short int m_quantizedAabbMax[3]; + unsigned short int m_quantizedAabbMin[3]; + unsigned short int m_quantizedAabbMax[3]; //4 bytes - int m_escapeIndexOrTriangleIndex; + int m_escapeIndexOrTriangleIndex; bool isLeafNode() const { @@ -75,68 +73,67 @@ ATTRIBUTE_ALIGNED16 (struct) btQuantizedBvhNode btAssert(!isLeafNode()); return -m_escapeIndexOrTriangleIndex; } - int getTriangleIndex() const + int getTriangleIndex() const { btAssert(isLeafNode()); - unsigned int x=0; - unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS); + unsigned int x = 0; + unsigned int y = (~(x & 0)) << (31 - MAX_NUM_PARTS_IN_BITS); // Get only the lower bits where the triangle index is stored - return (m_escapeIndexOrTriangleIndex&~(y)); + return (m_escapeIndexOrTriangleIndex & ~(y)); } - int getPartId() const + int getPartId() const { btAssert(isLeafNode()); // Get only the highest bits where the part index is stored - return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS)); + return (m_escapeIndexOrTriangleIndex >> (31 - MAX_NUM_PARTS_IN_BITS)); } -} -; +}; /// btOptimizedBvhNode contains both internal and leaf node information. /// Total node size is 44 bytes / node. You can use the compressed version of 16 bytes. -ATTRIBUTE_ALIGNED16 (struct) btOptimizedBvhNode +ATTRIBUTE_ALIGNED16(struct) +btOptimizedBvhNode { BT_DECLARE_ALIGNED_ALLOCATOR(); //32 bytes - btVector3 m_aabbMinOrg; - btVector3 m_aabbMaxOrg; + btVector3 m_aabbMinOrg; + btVector3 m_aabbMaxOrg; //4 - int m_escapeIndex; + int m_escapeIndex; //8 //for child nodes - int m_subPart; - int m_triangleIndex; + int m_subPart; + int m_triangleIndex; -//pad the size to 64 bytes - char m_padding[20]; + //pad the size to 64 bytes + char m_padding[20]; }; - ///btBvhSubtreeInfo provides info to gather a subtree of limited size -ATTRIBUTE_ALIGNED16(class) btBvhSubtreeInfo +ATTRIBUTE_ALIGNED16(class) +btBvhSubtreeInfo { public: BT_DECLARE_ALIGNED_ALLOCATOR(); //12 bytes - unsigned short int m_quantizedAabbMin[3]; - unsigned short int m_quantizedAabbMax[3]; + unsigned short int m_quantizedAabbMin[3]; + unsigned short int m_quantizedAabbMax[3]; //4 bytes, points to the root of the subtree - int m_rootNodeIndex; + int m_rootNodeIndex; //4 bytes - int m_subtreeSize; - int m_padding[3]; + int m_subtreeSize; + int m_padding[3]; btBvhSubtreeInfo() { //memset(&m_padding[0], 0, sizeof(m_padding)); } - - void setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode) + void setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode) { m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0]; m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1]; @@ -145,14 +142,12 @@ public: m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1]; m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2]; } -} -; - +}; class btNodeOverlapCallback { public: - virtual ~btNodeOverlapCallback() {}; + virtual ~btNodeOverlapCallback(){}; virtual void processNode(int subPart, int triangleIndex) = 0; }; @@ -160,18 +155,16 @@ public: #include "LinearMath/btAlignedAllocator.h" #include "LinearMath/btAlignedObjectArray.h" - - ///for code readability: -typedef btAlignedObjectArray<btOptimizedBvhNode> NodeArray; -typedef btAlignedObjectArray<btQuantizedBvhNode> QuantizedNodeArray; -typedef btAlignedObjectArray<btBvhSubtreeInfo> BvhSubtreeInfoArray; - +typedef btAlignedObjectArray<btOptimizedBvhNode> NodeArray; +typedef btAlignedObjectArray<btQuantizedBvhNode> QuantizedNodeArray; +typedef btAlignedObjectArray<btBvhSubtreeInfo> BvhSubtreeInfoArray; ///The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU. ///It is used by the btBvhTriangleMeshShape as midphase. ///It is recommended to use quantization for better performance and lower memory requirements. -ATTRIBUTE_ALIGNED16(class) btQuantizedBvh +ATTRIBUTE_ALIGNED16(class) +btQuantizedBvh { public: enum btTraversalMode @@ -182,54 +175,47 @@ public: }; protected: + btVector3 m_bvhAabbMin; + btVector3 m_bvhAabbMax; + btVector3 m_bvhQuantization; + int m_bulletVersion; //for serialization versioning. It could also be used to detect endianess. - btVector3 m_bvhAabbMin; - btVector3 m_bvhAabbMax; - btVector3 m_bvhQuantization; - - int m_bulletVersion; //for serialization versioning. It could also be used to detect endianess. - - int m_curNodeIndex; + int m_curNodeIndex; //quantization data - bool m_useQuantization; + bool m_useQuantization; + NodeArray m_leafNodes; + NodeArray m_contiguousNodes; + QuantizedNodeArray m_quantizedLeafNodes; + QuantizedNodeArray m_quantizedContiguousNodes; - - NodeArray m_leafNodes; - NodeArray m_contiguousNodes; - QuantizedNodeArray m_quantizedLeafNodes; - QuantizedNodeArray m_quantizedContiguousNodes; - - btTraversalMode m_traversalMode; - BvhSubtreeInfoArray m_SubtreeHeaders; + btTraversalMode m_traversalMode; + BvhSubtreeInfoArray m_SubtreeHeaders; //This is only used for serialization so we don't have to add serialization directly to btAlignedObjectArray mutable int m_subtreeHeaderCount; - - - - ///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!) ///this might be refactored into a virtual, it is usually not calculated at run-time - void setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin) + void setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin) { if (m_useQuantization) { - quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] ,aabbMin,0); - } else + quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0], aabbMin, 0); + } + else { m_contiguousNodes[nodeIndex].m_aabbMinOrg = aabbMin; - } } - void setInternalNodeAabbMax(int nodeIndex,const btVector3& aabbMax) + void setInternalNodeAabbMax(int nodeIndex, const btVector3& aabbMax) { if (m_useQuantization) { - quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0],aabbMax,1); - } else + quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0], aabbMax, 1); + } + else { m_contiguousNodes[nodeIndex].m_aabbMaxOrg = aabbMax; } @@ -243,115 +229,102 @@ protected: } //non-quantized return m_leafNodes[nodeIndex].m_aabbMinOrg; - } btVector3 getAabbMax(int nodeIndex) const { if (m_useQuantization) { return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMax[0]); - } + } //non-quantized return m_leafNodes[nodeIndex].m_aabbMaxOrg; - } - - void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex) + void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex) { if (m_useQuantization) { m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = -escapeIndex; - } + } else { m_contiguousNodes[nodeIndex].m_escapeIndex = escapeIndex; } - } - void mergeInternalNodeAabb(int nodeIndex,const btVector3& newAabbMin,const btVector3& newAabbMax) + void mergeInternalNodeAabb(int nodeIndex, const btVector3& newAabbMin, const btVector3& newAabbMax) { if (m_useQuantization) { unsigned short int quantizedAabbMin[3]; unsigned short int quantizedAabbMax[3]; - quantize(quantizedAabbMin,newAabbMin,0); - quantize(quantizedAabbMax,newAabbMax,1); - for (int i=0;i<3;i++) + quantize(quantizedAabbMin, newAabbMin, 0); + quantize(quantizedAabbMax, newAabbMax, 1); + for (int i = 0; i < 3; i++) { if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] > quantizedAabbMin[i]) m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] = quantizedAabbMin[i]; if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] < quantizedAabbMax[i]) m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] = quantizedAabbMax[i]; - } - } else + } + else { //non-quantized m_contiguousNodes[nodeIndex].m_aabbMinOrg.setMin(newAabbMin); - m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax); + m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax); } } - void swapLeafNodes(int firstIndex,int secondIndex); + void swapLeafNodes(int firstIndex, int secondIndex); - void assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex); + void assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex); protected: + void buildTree(int startIndex, int endIndex); - + int calcSplittingAxis(int startIndex, int endIndex); - void buildTree (int startIndex,int endIndex); + int sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis); - int calcSplittingAxis(int startIndex,int endIndex); + void walkStacklessTree(btNodeOverlapCallback * nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const; - int sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis); - - void walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const; - - void walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const; - void walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const; - void walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const; + void walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const; + void walkStacklessQuantizedTree(btNodeOverlapCallback * nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const; + void walkStacklessTreeAgainstRay(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const; ///tree traversal designed for small-memory processors like PS3 SPU - void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const; + void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback * nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const; ///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal - void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const; + void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode, btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const; ///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal - void walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA,const btQuantizedBvhNode* treeNodeB,btNodeOverlapCallback* nodeCallback) const; - + void walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA, const btQuantizedBvhNode* treeNodeB, btNodeOverlapCallback* nodeCallback) const; - - - void updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex); + void updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex); public: - BT_DECLARE_ALIGNED_ALLOCATOR(); btQuantizedBvh(); virtual ~btQuantizedBvh(); - ///***************************************** expert/internal use only ************************* - void setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin=btScalar(1.0)); - QuantizedNodeArray& getLeafNodeArray() { return m_quantizedLeafNodes; } + void setQuantizationValues(const btVector3& bvhAabbMin, const btVector3& bvhAabbMax, btScalar quantizationMargin = btScalar(1.0)); + QuantizedNodeArray& getLeafNodeArray() { return m_quantizedLeafNodes; } ///buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialized - void buildInternal(); + void buildInternal(); ///***************************************** expert/internal use only ************************* - void reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const; - void reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const; - void reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const; + void reportAabbOverlappingNodex(btNodeOverlapCallback * nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const; + void reportRayOverlappingNodex(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const; + void reportBoxCastOverlappingNodex(btNodeOverlapCallback * nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax) const; - SIMD_FORCE_INLINE void quantize(unsigned short* out, const btVector3& point,int isMax) const + SIMD_FORCE_INLINE void quantize(unsigned short* out, const btVector3& point, int isMax) const { - btAssert(m_useQuantization); btAssert(point.getX() <= m_bvhAabbMax.getX()); @@ -368,122 +341,114 @@ public: ///@todo: double-check this if (isMax) { - out[0] = (unsigned short) (((unsigned short)(v.getX()+btScalar(1.)) | 1)); - out[1] = (unsigned short) (((unsigned short)(v.getY()+btScalar(1.)) | 1)); - out[2] = (unsigned short) (((unsigned short)(v.getZ()+btScalar(1.)) | 1)); - } else + out[0] = (unsigned short)(((unsigned short)(v.getX() + btScalar(1.)) | 1)); + out[1] = (unsigned short)(((unsigned short)(v.getY() + btScalar(1.)) | 1)); + out[2] = (unsigned short)(((unsigned short)(v.getZ() + btScalar(1.)) | 1)); + } + else { - out[0] = (unsigned short) (((unsigned short)(v.getX()) & 0xfffe)); - out[1] = (unsigned short) (((unsigned short)(v.getY()) & 0xfffe)); - out[2] = (unsigned short) (((unsigned short)(v.getZ()) & 0xfffe)); + out[0] = (unsigned short)(((unsigned short)(v.getX()) & 0xfffe)); + out[1] = (unsigned short)(((unsigned short)(v.getY()) & 0xfffe)); + out[2] = (unsigned short)(((unsigned short)(v.getZ()) & 0xfffe)); } - #ifdef DEBUG_CHECK_DEQUANTIZATION btVector3 newPoint = unQuantize(out); if (isMax) { if (newPoint.getX() < point.getX()) { - printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX()); + printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n", newPoint.getX() - point.getX(), newPoint.getX(), point.getX()); } if (newPoint.getY() < point.getY()) { - printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY()); + printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n", newPoint.getY() - point.getY(), newPoint.getY(), point.getY()); } if (newPoint.getZ() < point.getZ()) { - - printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ()); + printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n", newPoint.getZ() - point.getZ(), newPoint.getZ(), point.getZ()); } - } else + } + else { if (newPoint.getX() > point.getX()) { - printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX()); + printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n", newPoint.getX() - point.getX(), newPoint.getX(), point.getX()); } if (newPoint.getY() > point.getY()) { - printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY()); + printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n", newPoint.getY() - point.getY(), newPoint.getY(), point.getY()); } if (newPoint.getZ() > point.getZ()) { - printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ()); + printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n", newPoint.getZ() - point.getZ(), newPoint.getZ(), point.getZ()); } } -#endif //DEBUG_CHECK_DEQUANTIZATION - +#endif //DEBUG_CHECK_DEQUANTIZATION } - - SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const btVector3& point2,int isMax) const + SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const btVector3& point2, int isMax) const { - btAssert(m_useQuantization); btVector3 clampedPoint(point2); clampedPoint.setMax(m_bvhAabbMin); clampedPoint.setMin(m_bvhAabbMax); - quantize(out,clampedPoint,isMax); - + quantize(out, clampedPoint, isMax); } - - SIMD_FORCE_INLINE btVector3 unQuantize(const unsigned short* vecIn) const + + SIMD_FORCE_INLINE btVector3 unQuantize(const unsigned short* vecIn) const { - btVector3 vecOut; - vecOut.setValue( + btVector3 vecOut; + vecOut.setValue( (btScalar)(vecIn[0]) / (m_bvhQuantization.getX()), (btScalar)(vecIn[1]) / (m_bvhQuantization.getY()), (btScalar)(vecIn[2]) / (m_bvhQuantization.getZ())); - vecOut += m_bvhAabbMin; - return vecOut; + vecOut += m_bvhAabbMin; + return vecOut; } ///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees. - void setTraversalMode(btTraversalMode traversalMode) + void setTraversalMode(btTraversalMode traversalMode) { m_traversalMode = traversalMode; } - - SIMD_FORCE_INLINE QuantizedNodeArray& getQuantizedNodeArray() - { - return m_quantizedContiguousNodes; + SIMD_FORCE_INLINE QuantizedNodeArray& getQuantizedNodeArray() + { + return m_quantizedContiguousNodes; } - - SIMD_FORCE_INLINE BvhSubtreeInfoArray& getSubtreeInfoArray() + SIMD_FORCE_INLINE BvhSubtreeInfoArray& getSubtreeInfoArray() { return m_SubtreeHeaders; } -//////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////// /////Calculate space needed to store BVH for serialization unsigned calculateSerializeBufferSize() const; /// Data buffer MUST be 16 byte aligned - virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const; + virtual bool serialize(void* o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const; ///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place' - static btQuantizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian); + static btQuantizedBvh* deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian); static unsigned int getAlignmentSerializationPadding(); -////////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////// - - virtual int calculateSerializeBufferSizeNew() const; + virtual int calculateSerializeBufferSizeNew() const; ///fills the dataBuffer and returns the struct name (and 0 on failure) - virtual const char* serialize(void* dataBuffer, btSerializer* serializer) const; - - virtual void deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData); + virtual const char* serialize(void* dataBuffer, btSerializer* serializer) const; - virtual void deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData); + virtual void deSerializeFloat(struct btQuantizedBvhFloatData & quantizedBvhFloatData); + virtual void deSerializeDouble(struct btQuantizedBvhDoubleData & quantizedBvhDoubleData); -//////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////// SIMD_FORCE_INLINE bool isQuantized() { @@ -494,38 +459,37 @@ private: // Special "copy" constructor that allows for in-place deserialization // Prevents btVector3's default constructor from being called, but doesn't inialize much else // ownsMemory should most likely be false if deserializing, and if you are not, don't call this (it also changes the function signature, which we need) - btQuantizedBvh(btQuantizedBvh &other, bool ownsMemory); - -} -; - + btQuantizedBvh(btQuantizedBvh & other, bool ownsMemory); +}; -struct btBvhSubtreeInfoData +// clang-format off +// parser needs * with the name +struct btBvhSubtreeInfoData { - int m_rootNodeIndex; - int m_subtreeSize; + int m_rootNodeIndex; + int m_subtreeSize; unsigned short m_quantizedAabbMin[3]; unsigned short m_quantizedAabbMax[3]; }; struct btOptimizedBvhNodeFloatData { - btVector3FloatData m_aabbMinOrg; - btVector3FloatData m_aabbMaxOrg; - int m_escapeIndex; - int m_subPart; - int m_triangleIndex; + btVector3FloatData m_aabbMinOrg; + btVector3FloatData m_aabbMaxOrg; + int m_escapeIndex; + int m_subPart; + int m_triangleIndex; char m_pad[4]; }; struct btOptimizedBvhNodeDoubleData { - btVector3DoubleData m_aabbMinOrg; - btVector3DoubleData m_aabbMaxOrg; - int m_escapeIndex; - int m_subPart; - int m_triangleIndex; - char m_pad[4]; + btVector3DoubleData m_aabbMinOrg; + btVector3DoubleData m_aabbMaxOrg; + int m_escapeIndex; + int m_subPart; + int m_triangleIndex; + char m_pad[4]; }; @@ -569,13 +533,11 @@ struct btQuantizedBvhDoubleData int m_numSubtreeHeaders; btBvhSubtreeInfoData *m_subTreeInfoPtr; }; +// clang-format on - -SIMD_FORCE_INLINE int btQuantizedBvh::calculateSerializeBufferSizeNew() const +SIMD_FORCE_INLINE int btQuantizedBvh::calculateSerializeBufferSizeNew() const { return sizeof(btQuantizedBvhData); } - - -#endif //BT_QUANTIZED_BVH_H +#endif //BT_QUANTIZED_BVH_H diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.cpp index 5f89f960e8..b7fe0a1f34 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.cpp @@ -24,50 +24,45 @@ subject to the following restrictions: #include <new> -void btSimpleBroadphase::validate() +void btSimpleBroadphase::validate() { - for (int i=0;i<m_numHandles;i++) + for (int i = 0; i < m_numHandles; i++) { - for (int j=i+1;j<m_numHandles;j++) + for (int j = i + 1; j < m_numHandles; j++) { btAssert(&m_pHandles[i] != &m_pHandles[j]); } } - } btSimpleBroadphase::btSimpleBroadphase(int maxProxies, btOverlappingPairCache* overlappingPairCache) - :m_pairCache(overlappingPairCache), - m_ownsPairCache(false), - m_invalidPair(0) + : m_pairCache(overlappingPairCache), + m_ownsPairCache(false), + m_invalidPair(0) { - if (!overlappingPairCache) { - void* mem = btAlignedAlloc(sizeof(btHashedOverlappingPairCache),16); - m_pairCache = new (mem)btHashedOverlappingPairCache(); + void* mem = btAlignedAlloc(sizeof(btHashedOverlappingPairCache), 16); + m_pairCache = new (mem) btHashedOverlappingPairCache(); m_ownsPairCache = true; } // allocate handles buffer and put all handles on free list - m_pHandlesRawPtr = btAlignedAlloc(sizeof(btSimpleBroadphaseProxy)*maxProxies,16); - m_pHandles = new(m_pHandlesRawPtr) btSimpleBroadphaseProxy[maxProxies]; + m_pHandlesRawPtr = btAlignedAlloc(sizeof(btSimpleBroadphaseProxy) * maxProxies, 16); + m_pHandles = new (m_pHandlesRawPtr) btSimpleBroadphaseProxy[maxProxies]; m_maxHandles = maxProxies; m_numHandles = 0; m_firstFreeHandle = 0; m_LastHandleIndex = -1; - { for (int i = m_firstFreeHandle; i < maxProxies; i++) { m_pHandles[i].SetNextFree(i + 1); - m_pHandles[i].m_uniqueId = i+2;//any UID will do, we just avoid too trivial values (0,1) for debugging purposes + m_pHandles[i].m_uniqueId = i + 2; //any UID will do, we just avoid too trivial values (0,1) for debugging purposes } m_pHandles[maxProxies - 1].SetNextFree(0); - } - } btSimpleBroadphase::~btSimpleBroadphase() @@ -81,26 +76,25 @@ btSimpleBroadphase::~btSimpleBroadphase() } } - -btBroadphaseProxy* btSimpleBroadphase::createProxy( const btVector3& aabbMin, const btVector3& aabbMax,int shapeType,void* userPtr , int collisionFilterGroup, int collisionFilterMask, btDispatcher* /*dispatcher*/) +btBroadphaseProxy* btSimpleBroadphase::createProxy(const btVector3& aabbMin, const btVector3& aabbMax, int shapeType, void* userPtr, int collisionFilterGroup, int collisionFilterMask, btDispatcher* /*dispatcher*/) { if (m_numHandles >= m_maxHandles) { btAssert(0); - return 0; //should never happen, but don't let the game crash ;-) + return 0; //should never happen, but don't let the game crash ;-) } - btAssert(aabbMin[0]<= aabbMax[0] && aabbMin[1]<= aabbMax[1] && aabbMin[2]<= aabbMax[2]); + btAssert(aabbMin[0] <= aabbMax[0] && aabbMin[1] <= aabbMax[1] && aabbMin[2] <= aabbMax[2]); int newHandleIndex = allocHandle(); - btSimpleBroadphaseProxy* proxy = new (&m_pHandles[newHandleIndex])btSimpleBroadphaseProxy(aabbMin,aabbMax,shapeType,userPtr,collisionFilterGroup,collisionFilterMask); + btSimpleBroadphaseProxy* proxy = new (&m_pHandles[newHandleIndex]) btSimpleBroadphaseProxy(aabbMin, aabbMax, shapeType, userPtr, collisionFilterGroup, collisionFilterMask); return proxy; } -class RemovingOverlapCallback : public btOverlapCallback +class RemovingOverlapCallback : public btOverlapCallback { protected: - virtual bool processOverlap(btBroadphasePair& pair) + virtual bool processOverlap(btBroadphasePair& pair) { (void)pair; btAssert(0); @@ -110,12 +104,13 @@ protected: class RemovePairContainingProxy { + btBroadphaseProxy* m_targetProxy; - btBroadphaseProxy* m_targetProxy; - public: +public: virtual ~RemovePairContainingProxy() { } + protected: virtual bool processOverlap(btBroadphasePair& pair) { @@ -126,38 +121,36 @@ protected: }; }; -void btSimpleBroadphase::destroyProxy(btBroadphaseProxy* proxyOrg,btDispatcher* dispatcher) +void btSimpleBroadphase::destroyProxy(btBroadphaseProxy* proxyOrg, btDispatcher* dispatcher) { - - btSimpleBroadphaseProxy* proxy0 = static_cast<btSimpleBroadphaseProxy*>(proxyOrg); - freeHandle(proxy0); + m_pairCache->removeOverlappingPairsContainingProxy(proxyOrg, dispatcher); - m_pairCache->removeOverlappingPairsContainingProxy(proxyOrg,dispatcher); + btSimpleBroadphaseProxy* proxy0 = static_cast<btSimpleBroadphaseProxy*>(proxyOrg); + freeHandle(proxy0); - //validate(); - + //validate(); } -void btSimpleBroadphase::getAabb(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const +void btSimpleBroadphase::getAabb(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const { const btSimpleBroadphaseProxy* sbp = getSimpleProxyFromProxy(proxy); aabbMin = sbp->m_aabbMin; aabbMax = sbp->m_aabbMax; } -void btSimpleBroadphase::setAabb(btBroadphaseProxy* proxy,const btVector3& aabbMin,const btVector3& aabbMax, btDispatcher* /*dispatcher*/) +void btSimpleBroadphase::setAabb(btBroadphaseProxy* proxy, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* /*dispatcher*/) { btSimpleBroadphaseProxy* sbp = getSimpleProxyFromProxy(proxy); sbp->m_aabbMin = aabbMin; sbp->m_aabbMax = aabbMax; } -void btSimpleBroadphase::rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin,const btVector3& aabbMax) +void btSimpleBroadphase::rayTest(const btVector3& rayFrom, const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin, const btVector3& aabbMax) { - for (int i=0; i <= m_LastHandleIndex; i++) + for (int i = 0; i <= m_LastHandleIndex; i++) { btSimpleBroadphaseProxy* proxy = &m_pHandles[i]; - if(!proxy->m_clientObject) + if (!proxy->m_clientObject) { continue; } @@ -165,69 +158,59 @@ void btSimpleBroadphase::rayTest(const btVector3& rayFrom,const btVector3& rayTo } } - -void btSimpleBroadphase::aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback) +void btSimpleBroadphase::aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback) { - for (int i=0; i <= m_LastHandleIndex; i++) + for (int i = 0; i <= m_LastHandleIndex; i++) { btSimpleBroadphaseProxy* proxy = &m_pHandles[i]; - if(!proxy->m_clientObject) + if (!proxy->m_clientObject) { continue; } - if (TestAabbAgainstAabb2(aabbMin,aabbMax,proxy->m_aabbMin,proxy->m_aabbMax)) + if (TestAabbAgainstAabb2(aabbMin, aabbMax, proxy->m_aabbMin, proxy->m_aabbMax)) { callback.process(proxy); } } } - - - - - - -bool btSimpleBroadphase::aabbOverlap(btSimpleBroadphaseProxy* proxy0,btSimpleBroadphaseProxy* proxy1) +bool btSimpleBroadphase::aabbOverlap(btSimpleBroadphaseProxy* proxy0, btSimpleBroadphaseProxy* proxy1) { - return proxy0->m_aabbMin[0] <= proxy1->m_aabbMax[0] && proxy1->m_aabbMin[0] <= proxy0->m_aabbMax[0] && + return proxy0->m_aabbMin[0] <= proxy1->m_aabbMax[0] && proxy1->m_aabbMin[0] <= proxy0->m_aabbMax[0] && proxy0->m_aabbMin[1] <= proxy1->m_aabbMax[1] && proxy1->m_aabbMin[1] <= proxy0->m_aabbMax[1] && proxy0->m_aabbMin[2] <= proxy1->m_aabbMax[2] && proxy1->m_aabbMin[2] <= proxy0->m_aabbMax[2]; - } - - //then remove non-overlapping ones class CheckOverlapCallback : public btOverlapCallback { public: virtual bool processOverlap(btBroadphasePair& pair) { - return (!btSimpleBroadphase::aabbOverlap(static_cast<btSimpleBroadphaseProxy*>(pair.m_pProxy0),static_cast<btSimpleBroadphaseProxy*>(pair.m_pProxy1))); + return (!btSimpleBroadphase::aabbOverlap(static_cast<btSimpleBroadphaseProxy*>(pair.m_pProxy0), static_cast<btSimpleBroadphaseProxy*>(pair.m_pProxy1))); } }; -void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) +void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) { //first check for new overlapping pairs - int i,j; + int i, j; if (m_numHandles >= 0) { int new_largest_index = -1; - for (i=0; i <= m_LastHandleIndex; i++) + for (i = 0; i <= m_LastHandleIndex; i++) { btSimpleBroadphaseProxy* proxy0 = &m_pHandles[i]; - if(!proxy0->m_clientObject) + if (!proxy0->m_clientObject) { continue; } new_largest_index = i; - for (j=i+1; j <= m_LastHandleIndex; j++) + for (j = i + 1; j <= m_LastHandleIndex; j++) { btSimpleBroadphaseProxy* proxy1 = &m_pHandles[j]; btAssert(proxy0 != proxy1); - if(!proxy1->m_clientObject) + if (!proxy1->m_clientObject) { continue; } @@ -235,19 +218,20 @@ void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) btSimpleBroadphaseProxy* p0 = getSimpleProxyFromProxy(proxy0); btSimpleBroadphaseProxy* p1 = getSimpleProxyFromProxy(proxy1); - if (aabbOverlap(p0,p1)) + if (aabbOverlap(p0, p1)) { - if ( !m_pairCache->findPair(proxy0,proxy1)) + if (!m_pairCache->findPair(proxy0, proxy1)) { - m_pairCache->addOverlappingPair(proxy0,proxy1); + m_pairCache->addOverlappingPair(proxy0, proxy1); } - } else + } + else { if (!m_pairCache->hasDeferredRemoval()) { - if ( m_pairCache->findPair(proxy0,proxy1)) + if (m_pairCache->findPair(proxy0, proxy1)) { - m_pairCache->removeOverlappingPair(proxy0,proxy1,dispatcher); + m_pairCache->removeOverlappingPair(proxy0, proxy1, dispatcher); } } } @@ -258,8 +242,7 @@ void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) if (m_ownsPairCache && m_pairCache->hasDeferredRemoval()) { - - btBroadphasePairArray& overlappingPairArray = m_pairCache->getOverlappingPairArray(); + btBroadphasePairArray& overlappingPairArray = m_pairCache->getOverlappingPairArray(); //perform a sort, to find duplicates and to sort 'invalid' pairs to the end overlappingPairArray.quickSort(btBroadphasePairSortPredicate()); @@ -267,16 +250,13 @@ void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) overlappingPairArray.resize(overlappingPairArray.size() - m_invalidPair); m_invalidPair = 0; - btBroadphasePair previousPair; previousPair.m_pProxy0 = 0; previousPair.m_pProxy1 = 0; previousPair.m_algorithm = 0; - - for (i=0;i<overlappingPairArray.size();i++) + for (i = 0; i < overlappingPairArray.size(); i++) { - btBroadphasePair& pair = overlappingPairArray[i]; bool isDuplicate = (pair == previousPair); @@ -287,16 +267,18 @@ void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) if (!isDuplicate) { - bool hasOverlap = testAabbOverlap(pair.m_pProxy0,pair.m_pProxy1); + bool hasOverlap = testAabbOverlap(pair.m_pProxy0, pair.m_pProxy1); if (hasOverlap) { - needsRemoval = false;//callback->processOverlap(pair); - } else + needsRemoval = false; //callback->processOverlap(pair); + } + else { needsRemoval = true; } - } else + } + else { //remove duplicate needsRemoval = true; @@ -306,7 +288,7 @@ void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) if (needsRemoval) { - m_pairCache->cleanOverlappingPair(pair,dispatcher); + m_pairCache->cleanOverlappingPair(pair, dispatcher); // m_overlappingPairArray.swap(i,m_overlappingPairArray.size()-1); // m_overlappingPairArray.pop_back(); @@ -314,7 +296,6 @@ void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) pair.m_pProxy1 = 0; m_invalidPair++; } - } ///if you don't like to skip the invalid pairs in the array, execute following code: @@ -326,21 +307,19 @@ void btSimpleBroadphase::calculateOverlappingPairs(btDispatcher* dispatcher) overlappingPairArray.resize(overlappingPairArray.size() - m_invalidPair); m_invalidPair = 0; -#endif//CLEAN_INVALID_PAIRS - +#endif //CLEAN_INVALID_PAIRS } } } - -bool btSimpleBroadphase::testAabbOverlap(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1) +bool btSimpleBroadphase::testAabbOverlap(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) { btSimpleBroadphaseProxy* p0 = getSimpleProxyFromProxy(proxy0); btSimpleBroadphaseProxy* p1 = getSimpleProxyFromProxy(proxy1); - return aabbOverlap(p0,p1); + return aabbOverlap(p0, p1); } -void btSimpleBroadphase::resetPool(btDispatcher* dispatcher) +void btSimpleBroadphase::resetPool(btDispatcher* dispatcher) { //not yet } diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.h b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.h index d7a18e400a..3e02fdc003 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.h +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btSimpleBroadphase.h @@ -16,57 +16,47 @@ subject to the following restrictions: #ifndef BT_SIMPLE_BROADPHASE_H #define BT_SIMPLE_BROADPHASE_H - #include "btOverlappingPairCache.h" - struct btSimpleBroadphaseProxy : public btBroadphaseProxy { - int m_nextFree; - -// int m_handleId; + int m_nextFree; - - btSimpleBroadphaseProxy() {}; + // int m_handleId; - btSimpleBroadphaseProxy(const btVector3& minpt,const btVector3& maxpt,int shapeType,void* userPtr, int collisionFilterGroup, int collisionFilterMask) - :btBroadphaseProxy(minpt,maxpt,userPtr,collisionFilterGroup,collisionFilterMask) + btSimpleBroadphaseProxy(){}; + + btSimpleBroadphaseProxy(const btVector3& minpt, const btVector3& maxpt, int shapeType, void* userPtr, int collisionFilterGroup, int collisionFilterMask) + : btBroadphaseProxy(minpt, maxpt, userPtr, collisionFilterGroup, collisionFilterMask) { (void)shapeType; } - - - SIMD_FORCE_INLINE void SetNextFree(int next) {m_nextFree = next;} - SIMD_FORCE_INLINE int GetNextFree() const {return m_nextFree;} - - - + SIMD_FORCE_INLINE void SetNextFree(int next) { m_nextFree = next; } + SIMD_FORCE_INLINE int GetNextFree() const { return m_nextFree; } }; ///The SimpleBroadphase is just a unit-test for btAxisSweep3, bt32BitAxisSweep3, or btDbvtBroadphase, so use those classes instead. ///It is a brute force aabb culling broadphase based on O(n^2) aabb checks class btSimpleBroadphase : public btBroadphaseInterface { - protected: + int m_numHandles; // number of active handles + int m_maxHandles; // max number of handles + int m_LastHandleIndex; - int m_numHandles; // number of active handles - int m_maxHandles; // max number of handles - int m_LastHandleIndex; - - btSimpleBroadphaseProxy* m_pHandles; // handles pool + btSimpleBroadphaseProxy* m_pHandles; // handles pool void* m_pHandlesRawPtr; - int m_firstFreeHandle; // free handles list - + int m_firstFreeHandle; // free handles list + int allocHandle() { btAssert(m_numHandles < m_maxHandles); int freeHandle = m_firstFreeHandle; m_firstFreeHandle = m_pHandles[freeHandle].GetNextFree(); m_numHandles++; - if(freeHandle > m_LastHandleIndex) + if (freeHandle > m_LastHandleIndex) { m_LastHandleIndex = freeHandle; } @@ -75,9 +65,9 @@ protected: void freeHandle(btSimpleBroadphaseProxy* proxy) { - int handle = int(proxy-m_pHandles); + int handle = int(proxy - m_pHandles); btAssert(handle >= 0 && handle < m_maxHandles); - if(handle == m_LastHandleIndex) + if (handle == m_LastHandleIndex) { m_LastHandleIndex--; } @@ -89,20 +79,18 @@ protected: m_numHandles--; } - btOverlappingPairCache* m_pairCache; - bool m_ownsPairCache; + btOverlappingPairCache* m_pairCache; + bool m_ownsPairCache; - int m_invalidPair; + int m_invalidPair; - - - inline btSimpleBroadphaseProxy* getSimpleProxyFromProxy(btBroadphaseProxy* proxy) + inline btSimpleBroadphaseProxy* getSimpleProxyFromProxy(btBroadphaseProxy* proxy) { btSimpleBroadphaseProxy* proxy0 = static_cast<btSimpleBroadphaseProxy*>(proxy); return proxy0; } - inline const btSimpleBroadphaseProxy* getSimpleProxyFromProxy(btBroadphaseProxy* proxy) const + inline const btSimpleBroadphaseProxy* getSimpleProxyFromProxy(btBroadphaseProxy* proxy) const { const btSimpleBroadphaseProxy* proxy0 = static_cast<const btSimpleBroadphaseProxy*>(proxy); return proxy0; @@ -111,61 +99,50 @@ protected: ///reset broadphase internal structures, to ensure determinism/reproducability virtual void resetPool(btDispatcher* dispatcher); - - void validate(); + void validate(); protected: - - - - public: - btSimpleBroadphase(int maxProxies=16384,btOverlappingPairCache* overlappingPairCache=0); + btSimpleBroadphase(int maxProxies = 16384, btOverlappingPairCache* overlappingPairCache = 0); virtual ~btSimpleBroadphase(); + static bool aabbOverlap(btSimpleBroadphaseProxy* proxy0, btSimpleBroadphaseProxy* proxy1); - static bool aabbOverlap(btSimpleBroadphaseProxy* proxy0,btSimpleBroadphaseProxy* proxy1); - + virtual btBroadphaseProxy* createProxy(const btVector3& aabbMin, const btVector3& aabbMax, int shapeType, void* userPtr, int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher); - virtual btBroadphaseProxy* createProxy( const btVector3& aabbMin, const btVector3& aabbMax,int shapeType,void* userPtr , int collisionFilterGroup, int collisionFilterMask, btDispatcher* dispatcher); + virtual void calculateOverlappingPairs(btDispatcher* dispatcher); - virtual void calculateOverlappingPairs(btDispatcher* dispatcher); + virtual void destroyProxy(btBroadphaseProxy* proxy, btDispatcher* dispatcher); + virtual void setAabb(btBroadphaseProxy* proxy, const btVector3& aabbMin, const btVector3& aabbMax, btDispatcher* dispatcher); + virtual void getAabb(btBroadphaseProxy* proxy, btVector3& aabbMin, btVector3& aabbMax) const; - virtual void destroyProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher); - virtual void setAabb(btBroadphaseProxy* proxy,const btVector3& aabbMin,const btVector3& aabbMax, btDispatcher* dispatcher); - virtual void getAabb(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const; + virtual void rayTest(const btVector3& rayFrom, const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin = btVector3(0, 0, 0), const btVector3& aabbMax = btVector3(0, 0, 0)); + virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback); - virtual void rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin=btVector3(0,0,0),const btVector3& aabbMax=btVector3(0,0,0)); - virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback); - - btOverlappingPairCache* getOverlappingPairCache() + btOverlappingPairCache* getOverlappingPairCache() { return m_pairCache; } - const btOverlappingPairCache* getOverlappingPairCache() const + const btOverlappingPairCache* getOverlappingPairCache() const { return m_pairCache; } - bool testAabbOverlap(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1); - + bool testAabbOverlap(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1); ///getAabb returns the axis aligned bounding box in the 'global' coordinate frame ///will add some transform later - virtual void getBroadphaseAabb(btVector3& aabbMin,btVector3& aabbMax) const + virtual void getBroadphaseAabb(btVector3& aabbMin, btVector3& aabbMax) const { - aabbMin.setValue(-BT_LARGE_FLOAT,-BT_LARGE_FLOAT,-BT_LARGE_FLOAT); - aabbMax.setValue(BT_LARGE_FLOAT,BT_LARGE_FLOAT,BT_LARGE_FLOAT); + aabbMin.setValue(-BT_LARGE_FLOAT, -BT_LARGE_FLOAT, -BT_LARGE_FLOAT); + aabbMax.setValue(BT_LARGE_FLOAT, BT_LARGE_FLOAT, BT_LARGE_FLOAT); } - virtual void printStats() + virtual void printStats() { -// printf("btSimpleBroadphase.h\n"); -// printf("numHandles = %d, maxHandles = %d\n",m_numHandles,m_maxHandles); + // printf("btSimpleBroadphase.h\n"); + // printf("numHandles = %d, maxHandles = %d\n",m_numHandles,m_maxHandles); } }; - - -#endif //BT_SIMPLE_BROADPHASE_H - +#endif //BT_SIMPLE_BROADPHASE_H |