a better approach to merge new manifolds on multithreaded CPU dispatcher
This commit is contained in:
@@ -28,6 +28,7 @@ subject to the following restrictions:
|
|||||||
btCollisionDispatcherMt::btCollisionDispatcherMt(btCollisionConfiguration* config, int grainSize)
|
btCollisionDispatcherMt::btCollisionDispatcherMt(btCollisionConfiguration* config, int grainSize)
|
||||||
: btCollisionDispatcher(config)
|
: btCollisionDispatcher(config)
|
||||||
{
|
{
|
||||||
|
m_batchManifoldsPtr.resize(btGetTaskScheduler()->getNumThreads());
|
||||||
m_batchUpdating = false;
|
m_batchUpdating = false;
|
||||||
m_grainSize = grainSize; // iterations per task
|
m_grainSize = grainSize; // iterations per task
|
||||||
}
|
}
|
||||||
@@ -65,6 +66,10 @@ btPersistentManifold* btCollisionDispatcherMt::getNewManifold(const btCollisionO
|
|||||||
manifold->m_index1a = m_manifoldsPtr.size();
|
manifold->m_index1a = m_manifoldsPtr.size();
|
||||||
m_manifoldsPtr.push_back(manifold);
|
m_manifoldsPtr.push_back(manifold);
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
m_batchManifoldsPtr[btGetCurrentThreadIndex()].push_back(manifold);
|
||||||
|
}
|
||||||
|
|
||||||
return manifold;
|
return manifold;
|
||||||
}
|
}
|
||||||
@@ -121,7 +126,7 @@ struct CollisionDispatcherUpdater : public btIParallelForBody
|
|||||||
|
|
||||||
void btCollisionDispatcherMt::dispatchAllCollisionPairs(btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher)
|
void btCollisionDispatcherMt::dispatchAllCollisionPairs(btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher)
|
||||||
{
|
{
|
||||||
int pairCount = pairCache->getNumOverlappingPairs();
|
const int pairCount = pairCache->getNumOverlappingPairs();
|
||||||
if (pairCount == 0)
|
if (pairCount == 0)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
@@ -136,16 +141,17 @@ void btCollisionDispatcherMt::dispatchAllCollisionPairs(btOverlappingPairCache*
|
|||||||
btParallelFor(0, pairCount, m_grainSize, updater);
|
btParallelFor(0, pairCount, m_grainSize, updater);
|
||||||
m_batchUpdating = false;
|
m_batchUpdating = false;
|
||||||
|
|
||||||
// reconstruct the manifolds array to ensure determinism
|
// merge new manifolds, if any
|
||||||
m_manifoldsPtr.resizeNoInitialize(0);
|
for (int i = 0; i < m_batchManifoldsPtr.size(); ++i)
|
||||||
|
{
|
||||||
|
btAlignedObjectArray<btPersistentManifold*>& batchManifoldsPtr = m_batchManifoldsPtr[i];
|
||||||
|
|
||||||
btBroadphasePair* pairs = pairCache->getOverlappingPairArrayPtr();
|
for (int j = 0; j < batchManifoldsPtr.size(); ++j)
|
||||||
for (int i = 0; i < pairCount; ++i)
|
|
||||||
{
|
{
|
||||||
if (btCollisionAlgorithm* algo = pairs[i].m_algorithm)
|
m_manifoldsPtr.push_back(batchManifoldsPtr[j]);
|
||||||
{
|
|
||||||
algo->getAllContactManifolds(m_manifoldsPtr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
batchManifoldsPtr.resizeNoInitialize(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// update the indices (used when releasing manifolds)
|
// update the indices (used when releasing manifolds)
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ public:
|
|||||||
virtual void dispatchAllCollisionPairs(btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher) BT_OVERRIDE;
|
virtual void dispatchAllCollisionPairs(btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher) BT_OVERRIDE;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
btAlignedObjectArray<btAlignedObjectArray<btPersistentManifold*>> m_batchManifoldsPtr;
|
||||||
bool m_batchUpdating;
|
bool m_batchUpdating;
|
||||||
int m_grainSize;
|
int m_grainSize;
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user