use size_t instead of int, for allocator
added hashtable based PairManager, thanks Pierre Terdiman and Erin Catto improved friction in 'cachefriendly' solver moved 'refreshcontactpoints' into collision detection, instead of solver avoid linear search for contact manifolds, by storing an index ignore margin for sphere shape (its entire radius is already margin) avoid alignment checks in BVH serialization, they don't compile on 64-bit architectures made 'bomb' box more heavy
This commit is contained in:
@@ -906,12 +906,13 @@ bool btOptimizedBvh::serialize(void *o_alignedDataBuffer, unsigned i_dataBufferS
|
||||
assert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
|
||||
m_subtreeHeaderCount = m_SubtreeHeaders.size();
|
||||
|
||||
if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
|
||||
/* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
|
||||
{
|
||||
///check alignedment for buffer?
|
||||
btAssert(0);
|
||||
return false;
|
||||
}
|
||||
*/
|
||||
|
||||
btOptimizedBvh *targetBvh = (btOptimizedBvh *)o_alignedDataBuffer;
|
||||
|
||||
@@ -946,7 +947,7 @@ bool btOptimizedBvh::serialize(void *o_alignedDataBuffer, unsigned i_dataBufferS
|
||||
unsigned char *nodeData = (unsigned char *)targetBvh;
|
||||
nodeData += sizeof(btOptimizedBvh);
|
||||
|
||||
unsigned sizeToAdd = (BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
nodeData += sizeToAdd;
|
||||
|
||||
int nodeCount = m_curNodeIndex;
|
||||
@@ -1021,7 +1022,7 @@ bool btOptimizedBvh::serialize(void *o_alignedDataBuffer, unsigned i_dataBufferS
|
||||
nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
|
||||
}
|
||||
|
||||
sizeToAdd = (BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
nodeData += sizeToAdd;
|
||||
|
||||
// Now serialize the subtree headers
|
||||
@@ -1068,7 +1069,7 @@ bool btOptimizedBvh::serialize(void *o_alignedDataBuffer, unsigned i_dataBufferS
|
||||
btOptimizedBvh *btOptimizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian)
|
||||
{
|
||||
|
||||
if (i_alignedDataBuffer == NULL || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
|
||||
if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -1097,7 +1098,7 @@ btOptimizedBvh *btOptimizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un
|
||||
unsigned char *nodeData = (unsigned char *)bvh;
|
||||
nodeData += sizeof(btOptimizedBvh);
|
||||
|
||||
unsigned sizeToAdd = (BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
nodeData += sizeToAdd;
|
||||
|
||||
int nodeCount = bvh->m_curNodeIndex;
|
||||
@@ -1146,7 +1147,7 @@ btOptimizedBvh *btOptimizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un
|
||||
nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
|
||||
}
|
||||
|
||||
sizeToAdd = (BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
|
||||
nodeData += sizeToAdd;
|
||||
|
||||
// Now serialize the subtree headers
|
||||
|
||||
@@ -42,7 +42,7 @@ public:
|
||||
|
||||
virtual int getShapeType() const { return SPHERE_SHAPE_PROXYTYPE; }
|
||||
|
||||
btScalar getRadius() const { return m_implicitShapeDimensions.getX();}
|
||||
btScalar getRadius() const { return m_implicitShapeDimensions.getX() * m_localScaling.getX();}
|
||||
|
||||
//debugging
|
||||
virtual char* getName()const {return "SPHERE";}
|
||||
@@ -55,7 +55,7 @@ public:
|
||||
{
|
||||
//to improve gjk behaviour, use radius+margin as the full margin, so never get into the penetration case
|
||||
//this means, non-uniform scaling is not supported anymore
|
||||
return m_localScaling.getX() * getRadius() + btConvexInternalShape::getMargin();
|
||||
return getRadius();
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user