Merge pull request #1144 from lunkhound/pr-threading-refactor

Bullet 2 threading refactor: moved parallel-for calls into core libs
This commit is contained in:
erwincoumans
2017-06-02 16:18:12 -07:00
committed by GitHub
20 changed files with 1550 additions and 858 deletions

View File

@@ -28,7 +28,15 @@ OPTION(USE_DOUBLE_PRECISION "Use double precision" OFF)
OPTION(USE_GRAPHICAL_BENCHMARK "Use Graphical Benchmark" ON) OPTION(USE_GRAPHICAL_BENCHMARK "Use Graphical Benchmark" ON)
OPTION(BUILD_SHARED_LIBS "Use shared libraries" OFF) OPTION(BUILD_SHARED_LIBS "Use shared libraries" OFF)
OPTION(USE_SOFT_BODY_MULTI_BODY_DYNAMICS_WORLD "Use btSoftMultiBodyDynamicsWorld" OFF) OPTION(USE_SOFT_BODY_MULTI_BODY_DYNAMICS_WORLD "Use btSoftMultiBodyDynamicsWorld" OFF)
OPTION(BULLET2_USE_THREAD_LOCKS "Build Bullet 2 libraries with mutex locking around certain operations" OFF)
OPTION(BULLET2_USE_THREAD_LOCKS "Build Bullet 2 libraries with mutex locking around certain operations (required for multi-threading)" OFF)
IF (BULLET2_USE_THREAD_LOCKS)
OPTION(BULLET2_USE_OPEN_MP_MULTITHREADING "Build Bullet 2 with support for multi-threading with OpenMP (requires a compiler with OpenMP support)" OFF)
OPTION(BULLET2_USE_TBB_MULTITHREADING "Build Bullet 2 with support for multi-threading with Intel Threading Building Blocks (requires the TBB library to be already installed)" OFF)
IF (MSVC)
OPTION(BULLET2_USE_PPL_MULTITHREADING "Build Bullet 2 with support for multi-threading with Microsoft Parallel Patterns Library (requires MSVC compiler)" OFF)
ENDIF (MSVC)
ENDIF (BULLET2_USE_THREAD_LOCKS)
OPTION(USE_MSVC_INCREMENTAL_LINKING "Use MSVC Incremental Linking" OFF) OPTION(USE_MSVC_INCREMENTAL_LINKING "Use MSVC Incremental Linking" OFF)
OPTION(USE_CUSTOM_VECTOR_MATH "Use custom vectormath library" OFF) OPTION(USE_CUSTOM_VECTOR_MATH "Use custom vectormath library" OFF)
@@ -208,6 +216,30 @@ IF(BULLET2_USE_THREAD_LOCKS)
ENDIF (NOT MSVC) ENDIF (NOT MSVC)
ENDIF (BULLET2_USE_THREAD_LOCKS) ENDIF (BULLET2_USE_THREAD_LOCKS)
IF (BULLET2_USE_OPEN_MP_MULTITHREADING)
ADD_DEFINITIONS("-DBT_USE_OPENMP=1")
IF (MSVC)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /openmp")
ELSE (MSVC)
# GCC, Clang
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
ENDIF (MSVC)
ENDIF (BULLET2_USE_OPEN_MP_MULTITHREADING)
IF (BULLET2_USE_TBB_MULTITHREADING)
SET (BULLET2_TBB_INCLUDE_DIR "not found" CACHE PATH "Directory for Intel TBB includes.")
SET (BULLET2_TBB_LIB_DIR "not found" CACHE PATH "Directory for Intel TBB libraries.")
find_library(TBB_LIBRARY tbb PATHS ${BULLET2_TBB_LIB_DIR})
find_library(TBBMALLOC_LIBRARY tbbmalloc PATHS ${BULLET2_TBB_LIB_DIR})
ADD_DEFINITIONS("-DBT_USE_TBB=1")
INCLUDE_DIRECTORIES( ${BULLET2_TBB_INCLUDE_DIR} )
LINK_LIBRARIES( ${TBB_LIBRARY} ${TBBMALLOC_LIBRARY} )
ENDIF (BULLET2_USE_TBB_MULTITHREADING)
IF (BULLET2_USE_PPL_MULTITHREADING)
ADD_DEFINITIONS("-DBT_USE_PPL=1")
ENDIF (BULLET2_USE_PPL_MULTITHREADING)
IF (WIN32) IF (WIN32)
OPTION(USE_GLUT "Use Glut" ON) OPTION(USE_GLUT "Use Glut" ON)
ADD_DEFINITIONS( -D_CRT_SECURE_NO_WARNINGS ) ADD_DEFINITIONS( -D_CRT_SECURE_NO_WARNINGS )
@@ -350,14 +382,6 @@ IF(BUILD_BULLET2_DEMOS)
SUBDIRS(examples) SUBDIRS(examples)
ENDIF() ENDIF()
IF (BULLET2_USE_THREAD_LOCKS)
OPTION(BULLET2_MULTITHREADED_OPEN_MP_DEMO "Build Bullet 2 MultithreadedDemo using OpenMP (requires a compiler with OpenMP support)" OFF)
OPTION(BULLET2_MULTITHREADED_TBB_DEMO "Build Bullet 2 MultithreadedDemo using Intel Threading Building Blocks (requires the TBB library to be already installed)" OFF)
IF (MSVC)
OPTION(BULLET2_MULTITHREADED_PPL_DEMO "Build Bullet 2 MultithreadedDemo using Microsoft Parallel Patterns Library (requires MSVC compiler)" OFF)
ENDIF (MSVC)
ENDIF (BULLET2_USE_THREAD_LOCKS)
ENDIF(BUILD_BULLET2_DEMOS) ENDIF(BUILD_BULLET2_DEMOS)

View File

@@ -32,7 +32,6 @@ subject to the following restrictions:
#include "LinearMath/btAlignedObjectArray.h" #include "LinearMath/btAlignedObjectArray.h"
#include "LinearMath/btTransform.h" #include "LinearMath/btTransform.h"
#include "../MultiThreadedDemo/ParallelFor.h"
class btDynamicsWorld; class btDynamicsWorld;
@@ -230,7 +229,7 @@ public:
} }
} }
struct CastRaysLoopBody struct CastRaysLoopBody : public btIParallelForBody
{ {
btCollisionWorld* mWorld; btCollisionWorld* mWorld;
btRaycastBar2* mRaycasts; btRaycastBar2* mRaycasts;
@@ -274,7 +273,7 @@ public:
{ {
CastRaysLoopBody rayLooper(cw, this); CastRaysLoopBody rayLooper(cw, this);
int grainSize = 20; // number of raycasts per task int grainSize = 20; // number of raycasts per task
parallelFor( 0, NUMRAYS, grainSize, rayLooper ); btParallelFor( 0, NUMRAYS, grainSize, rayLooper );
} }
else else
#endif // USE_PARALLEL_RAYCASTS #endif // USE_PARALLEL_RAYCASTS

View File

@@ -110,29 +110,6 @@ ELSE(WIN32)
ENDIF(APPLE) ENDIF(APPLE)
ENDIF(WIN32) ENDIF(WIN32)
IF (BULLET2_MULTITHREADED_OPEN_MP_DEMO)
ADD_DEFINITIONS("-DBT_USE_OPENMP=1")
IF (MSVC)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /openmp")
ELSE (MSVC)
# GCC, Clang
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
ENDIF (MSVC)
ENDIF (BULLET2_MULTITHREADED_OPEN_MP_DEMO)
IF (BULLET2_MULTITHREADED_PPL_DEMO)
ADD_DEFINITIONS("-DBT_USE_PPL=1")
ENDIF (BULLET2_MULTITHREADED_PPL_DEMO)
IF (BULLET2_MULTITHREADED_TBB_DEMO)
SET (BULLET2_TBB_INCLUDE_DIR "not found" CACHE PATH "Directory for Intel TBB includes.")
SET (BULLET2_TBB_LIB_DIR "not found" CACHE PATH "Directory for Intel TBB libraries.")
find_library(TBB_LIBRARY tbb PATHS ${BULLET2_TBB_LIB_DIR})
find_library(TBBMALLOC_LIBRARY tbbmalloc PATHS ${BULLET2_TBB_LIB_DIR})
ADD_DEFINITIONS("-DBT_USE_TBB=1")
INCLUDE_DIRECTORIES( ${BULLET2_TBB_INCLUDE_DIR} )
LINK_LIBRARIES( ${TBB_LIBRARY} ${TBBMALLOC_LIBRARY} )
ENDIF (BULLET2_MULTITHREADED_TBB_DEMO)
SET(ExtendedTutorialsSources SET(ExtendedTutorialsSources
../ExtendedTutorials/Chain.cpp ../ExtendedTutorials/Chain.cpp
@@ -208,7 +185,6 @@ SET(BulletExampleBrowser_SRCS
../MultiThreadedDemo/MultiThreadedDemo.h ../MultiThreadedDemo/MultiThreadedDemo.h
../MultiThreadedDemo/CommonRigidBodyMTBase.cpp ../MultiThreadedDemo/CommonRigidBodyMTBase.cpp
../MultiThreadedDemo/CommonRigidBodyMTBase.h ../MultiThreadedDemo/CommonRigidBodyMTBase.h
../MultiThreadedDemo/ParallelFor.h
../Tutorial/Tutorial.cpp ../Tutorial/Tutorial.cpp
../Tutorial/Tutorial.h ../Tutorial/Tutorial.h
../Tutorial/Dof6ConstraintTutorial.cpp ../Tutorial/Dof6ConstraintTutorial.cpp
@@ -245,6 +221,7 @@ SET(BulletExampleBrowser_SRCS
../MultiThreading/b3PosixThreadSupport.cpp ../MultiThreading/b3PosixThreadSupport.cpp
../MultiThreading/b3Win32ThreadSupport.cpp ../MultiThreading/b3Win32ThreadSupport.cpp
../MultiThreading/b3ThreadSupportInterface.cpp ../MultiThreading/b3ThreadSupportInterface.cpp
../MultiThreading/btTaskScheduler.cpp
../RenderingExamples/TinyRendererSetup.cpp ../RenderingExamples/TinyRendererSetup.cpp
../RenderingExamples/TimeSeriesCanvas.cpp ../RenderingExamples/TimeSeriesCanvas.cpp
../RenderingExamples/TimeSeriesCanvas.h ../RenderingExamples/TimeSeriesCanvas.h
@@ -387,7 +364,7 @@ ADD_CUSTOM_COMMAND(
COMMAND ${CMAKE_COMMAND} ARGS -E copy_directory ${BULLET_PHYSICS_SOURCE_DIR}/data ${PROJECT_BINARY_DIR}/data COMMAND ${CMAKE_COMMAND} ARGS -E copy_directory ${BULLET_PHYSICS_SOURCE_DIR}/data ${PROJECT_BINARY_DIR}/data
) )
IF (BULLET2_MULTITHREADED_TBB_DEMO AND WIN32) IF (BULLET2_USE_TBB_MULTITHREADING AND WIN32)
# add a post build command to copy some dlls to the executable directory # add a post build command to copy some dlls to the executable directory
set(TBB_VC_VER "vc12") set(TBB_VC_VER "vc12")
set(TBB_VC_ARCH "ia32") set(TBB_VC_ARCH "ia32")
@@ -401,7 +378,7 @@ IF (BULLET2_MULTITHREADED_TBB_DEMO AND WIN32)
COMMAND ${CMAKE_COMMAND} -E copy_if_different COMMAND ${CMAKE_COMMAND} -E copy_if_different
"${BULLET2_TBB_INCLUDE_DIR}/../bin/${TBB_VC_ARCH}/${TBB_VC_VER}/tbbmalloc.dll" "${BULLET2_TBB_INCLUDE_DIR}/../bin/${TBB_VC_ARCH}/${TBB_VC_VER}/tbbmalloc.dll"
$<TARGET_FILE_DIR:App_ExampleBrowser>) $<TARGET_FILE_DIR:App_ExampleBrowser>)
ENDIF (BULLET2_MULTITHREADED_TBB_DEMO AND WIN32) ENDIF (BULLET2_USE_TBB_MULTITHREADING AND WIN32)
IF (INTERNAL_ADD_POSTFIX_EXECUTABLE_NAMES) IF (INTERNAL_ADD_POSTFIX_EXECUTABLE_NAMES)

View File

@@ -295,7 +295,7 @@ static ExampleEntry gDefaultExamples[]=
ExampleEntry(1,"Fracture demo", "Create a basic custom implementation to model fracturing objects, based on a btCompoundShape. It explicitly propagates the collision impulses and breaks the rigid body into multiple rigid bodies. Press F to toggle fracture and glue mode.", FractureDemoCreateFunc), ExampleEntry(1,"Fracture demo", "Create a basic custom implementation to model fracturing objects, based on a btCompoundShape. It explicitly propagates the collision impulses and breaks the rigid body into multiple rigid bodies. Press F to toggle fracture and glue mode.", FractureDemoCreateFunc),
ExampleEntry(1,"Planar 2D","Show the use of 2D collision shapes and rigid body simulation. The collision shape is wrapped into a btConvex2dShape. The rigid bodies are restricted in a plane using the 'setAngularFactor' and 'setLinearFactor' API call.",Planar2DCreateFunc), ExampleEntry(1,"Planar 2D","Show the use of 2D collision shapes and rigid body simulation. The collision shape is wrapped into a btConvex2dShape. The rigid bodies are restricted in a plane using the 'setAngularFactor' and 'setLinearFactor' API call.",Planar2DCreateFunc),
#if BT_USE_OPENMP || BT_USE_TBB || BT_USE_PPL #if BT_THREADSAFE
// only enable MultiThreaded demo if a task scheduler is available // only enable MultiThreaded demo if a task scheduler is available
ExampleEntry( 1, "Multithreaded Demo", ExampleEntry( 1, "Multithreaded Demo",
"Stacks of boxes that do not sleep. Good for testing performance with large numbers of bodies and contacts. Sliders can be used to change the number of stacks (restart needed after each change)." "Stacks of boxes that do not sleep. Good for testing performance with large numbers of bodies and contacts. Sliders can be used to change the number of stacks (restart needed after each change)."

View File

@@ -23,10 +23,10 @@ class btCollisionShape;
#include "CommonRigidBodyMTBase.h" #include "CommonRigidBodyMTBase.h"
#include "../CommonInterfaces/CommonParameterInterface.h" #include "../CommonInterfaces/CommonParameterInterface.h"
#include "ParallelFor.h"
#include "LinearMath/btAlignedObjectArray.h" #include "LinearMath/btAlignedObjectArray.h"
#include "LinearMath/btPoolAllocator.h" #include "LinearMath/btPoolAllocator.h"
#include "btBulletCollisionCommon.h" #include "btBulletCollisionCommon.h"
#include "BulletCollision/CollisionDispatch/btCollisionDispatcherMt.h"
#include "BulletDynamics/Dynamics/btSimulationIslandManagerMt.h" // for setSplitIslands() #include "BulletDynamics/Dynamics/btSimulationIslandManagerMt.h" // for setSplitIslands()
#include "BulletDynamics/Dynamics/btDiscreteDynamicsWorldMt.h" #include "BulletDynamics/Dynamics/btDiscreteDynamicsWorldMt.h"
#include "BulletDynamics/ConstraintSolver/btSequentialImpulseConstraintSolver.h" #include "BulletDynamics/ConstraintSolver/btSequentialImpulseConstraintSolver.h"
@@ -35,21 +35,8 @@ class btCollisionShape;
#include "BulletDynamics/MLCPSolvers/btSolveProjectedGaussSeidel.h" #include "BulletDynamics/MLCPSolvers/btSolveProjectedGaussSeidel.h"
#include "BulletDynamics/MLCPSolvers/btDantzigSolver.h" #include "BulletDynamics/MLCPSolvers/btDantzigSolver.h"
#include "BulletDynamics/MLCPSolvers/btLemkeSolver.h" #include "BulletDynamics/MLCPSolvers/btLemkeSolver.h"
#include "../MultiThreading/btTaskScheduler.h"
TaskManager gTaskMgr;
#define USE_PARALLEL_NARROWPHASE 1 // detect collisions in parallel
#define USE_PARALLEL_ISLAND_SOLVER 1 // solve simulation islands in parallel
#define USE_PARALLEL_CREATE_PREDICTIVE_CONTACTS 1
#define USE_PARALLEL_INTEGRATE_TRANSFORMS 1
#define USE_PARALLEL_PREDICT_UNCONSTRAINED_MOTION 1
#if defined (_MSC_VER) && _MSC_VER >= 1600
// give us a compile error if any signatures of overriden methods is changed
#define BT_OVERRIDE override
#else
#define BT_OVERRIDE
#endif
static int gNumIslands = 0; static int gNumIslands = 0;
@@ -124,7 +111,7 @@ public:
}; };
Profiler gProfiler; static Profiler gProfiler;
class ProfileHelper class ProfileHelper
{ {
@@ -141,457 +128,84 @@ public:
} }
}; };
int gThreadsRunningCounter = 0; static void profileBeginCallback( btDynamicsWorld *world, btScalar timeStep )
btSpinMutex gThreadsRunningCounterMutex;
void btPushThreadsAreRunning()
{ {
gThreadsRunningCounterMutex.lock(); gProfiler.begin( Profiler::kRecordInternalTimeStep );
gThreadsRunningCounter++;
gThreadsRunningCounterMutex.unlock();
} }
void btPopThreadsAreRunning() static void profileEndCallback( btDynamicsWorld *world, btScalar timeStep )
{ {
gThreadsRunningCounterMutex.lock(); gProfiler.end( Profiler::kRecordInternalTimeStep );
gThreadsRunningCounter--;
gThreadsRunningCounterMutex.unlock();
}
bool btThreadsAreRunning()
{
return gThreadsRunningCounter != 0;
} }
#if USE_PARALLEL_NARROWPHASE ///
/// MyCollisionDispatcher -- subclassed for profiling purposes
class MyCollisionDispatcher : public btCollisionDispatcher ///
class MyCollisionDispatcher : public btCollisionDispatcherMt
{ {
btSpinMutex m_manifoldPtrsMutex; typedef btCollisionDispatcherMt ParentClass;
public: public:
MyCollisionDispatcher( btCollisionConfiguration* config ) : btCollisionDispatcher( config ) MyCollisionDispatcher( btCollisionConfiguration* config, int grainSize ) : btCollisionDispatcherMt( config, grainSize )
{ {
} }
virtual ~MyCollisionDispatcher()
{
}
btPersistentManifold* getNewManifold( const btCollisionObject* body0, const btCollisionObject* body1 ) BT_OVERRIDE
{
// added spin-locks
//optional relative contact breaking threshold, turned on by default (use setDispatcherFlags to switch off feature for improved performance)
btScalar contactBreakingThreshold = ( m_dispatcherFlags & btCollisionDispatcher::CD_USE_RELATIVE_CONTACT_BREAKING_THRESHOLD ) ?
btMin( body0->getCollisionShape()->getContactBreakingThreshold( gContactBreakingThreshold ), body1->getCollisionShape()->getContactBreakingThreshold( gContactBreakingThreshold ) )
: gContactBreakingThreshold;
btScalar contactProcessingThreshold = btMin( body0->getContactProcessingThreshold(), body1->getContactProcessingThreshold() );
void* mem = m_persistentManifoldPoolAllocator->allocate( sizeof( btPersistentManifold ) );
if (NULL == mem)
{
//we got a pool memory overflow, by default we fallback to dynamically allocate memory. If we require a contiguous contact pool then assert.
if ( ( m_dispatcherFlags&CD_DISABLE_CONTACTPOOL_DYNAMIC_ALLOCATION ) == 0 )
{
mem = btAlignedAlloc( sizeof( btPersistentManifold ), 16 );
}
else
{
btAssert( 0 );
//make sure to increase the m_defaultMaxPersistentManifoldPoolSize in the btDefaultCollisionConstructionInfo/btDefaultCollisionConfiguration
return 0;
}
}
btPersistentManifold* manifold = new(mem) btPersistentManifold( body0, body1, 0, contactBreakingThreshold, contactProcessingThreshold );
m_manifoldPtrsMutex.lock();
manifold->m_index1a = m_manifoldsPtr.size();
m_manifoldsPtr.push_back( manifold );
m_manifoldPtrsMutex.unlock();
return manifold;
}
void releaseManifold( btPersistentManifold* manifold ) BT_OVERRIDE
{
clearManifold( manifold );
m_manifoldPtrsMutex.lock();
int findIndex = manifold->m_index1a;
btAssert( findIndex < m_manifoldsPtr.size() );
m_manifoldsPtr.swap( findIndex, m_manifoldsPtr.size() - 1 );
m_manifoldsPtr[ findIndex ]->m_index1a = findIndex;
m_manifoldsPtr.pop_back();
m_manifoldPtrsMutex.unlock();
manifold->~btPersistentManifold();
if ( m_persistentManifoldPoolAllocator->validPtr( manifold ) )
{
m_persistentManifoldPoolAllocator->freeMemory( manifold );
}
else
{
btAlignedFree( manifold );
}
}
struct Updater
{
btBroadphasePair* mPairArray;
btNearCallback mCallback;
btCollisionDispatcher* mDispatcher;
const btDispatcherInfo* mInfo;
Updater()
{
mPairArray = NULL;
mCallback = NULL;
mDispatcher = NULL;
mInfo = NULL;
}
void forLoop( int iBegin, int iEnd ) const
{
for ( int i = iBegin; i < iEnd; ++i )
{
btBroadphasePair* pair = &mPairArray[ i ];
mCallback( *pair, *mDispatcher, *mInfo );
}
}
};
virtual void dispatchAllCollisionPairs( btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher ) BT_OVERRIDE virtual void dispatchAllCollisionPairs( btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher ) BT_OVERRIDE
{ {
ProfileHelper prof(Profiler::kRecordDispatchAllCollisionPairs); ProfileHelper prof( Profiler::kRecordDispatchAllCollisionPairs );
int grainSize = 40; // iterations per task ParentClass::dispatchAllCollisionPairs( pairCache, info, dispatcher );
int pairCount = pairCache->getNumOverlappingPairs();
Updater updater;
updater.mCallback = getNearCallback();
updater.mPairArray = pairCount > 0 ? pairCache->getOverlappingPairArrayPtr() : NULL;
updater.mDispatcher = this;
updater.mInfo = &info;
btPushThreadsAreRunning();
parallelFor( 0, pairCount, grainSize, updater );
btPopThreadsAreRunning();
if (m_manifoldsPtr.size() < 1)
return;
// reconstruct the manifolds array to ensure determinism
m_manifoldsPtr.resizeNoInitialize(0);
btBroadphasePair* pairs = pairCache->getOverlappingPairArrayPtr();
for (int i = 0; i < pairCount; ++i)
{
btCollisionAlgorithm* algo = pairs[i].m_algorithm;
if (algo) algo->getAllContactManifolds(m_manifoldsPtr);
}
// update the indices (used when releasing manifolds)
for (int i = 0; i < m_manifoldsPtr.size(); ++i)
m_manifoldsPtr[i]->m_index1a = i;
} }
}; };
#endif
#if USE_PARALLEL_ISLAND_SOLVER
/// ///
/// MyConstraintSolverPool - masquerades as a constraint solver, but really it is a threadsafe pool of them. /// myParallelIslandDispatch -- wrap default parallel dispatch for profiling and to get the number of simulation islands
/// //
/// Each solver in the pool is protected by a mutex. When solveGroup is called from a thread, void myParallelIslandDispatch( btAlignedObjectArray<btSimulationIslandManagerMt::Island*>* islandsPtr, btSimulationIslandManagerMt::IslandCallback* callback )
/// the pool looks for a solver that isn't being used by another thread, locks it, and dispatches the
/// call to the solver.
/// So long as there are at least as many solvers as there are hardware threads, it should never need to
/// spin wait.
///
class MyConstraintSolverPool : public btConstraintSolver
{ {
const static size_t kCacheLineSize = 128; ProfileHelper prof( Profiler::kRecordDispatchIslands );
struct ThreadSolver
{
btConstraintSolver* solver;
btSpinMutex mutex;
char _cachelinePadding[ kCacheLineSize - sizeof( btSpinMutex ) - sizeof( void* ) ]; // keep mutexes from sharing a cache line
};
btAlignedObjectArray<ThreadSolver> m_solvers;
btConstraintSolverType m_solverType;
ThreadSolver* getAndLockThreadSolver()
{
while ( true )
{
for ( int i = 0; i < m_solvers.size(); ++i )
{
ThreadSolver& solver = m_solvers[ i ];
if ( solver.mutex.tryLock() )
{
return &solver;
}
}
}
return NULL;
}
void init( btConstraintSolver** solvers, int numSolvers )
{
m_solverType = BT_SEQUENTIAL_IMPULSE_SOLVER;
m_solvers.resize( numSolvers );
for ( int i = 0; i < numSolvers; ++i )
{
m_solvers[ i ].solver = solvers[ i ];
}
if ( numSolvers > 0 )
{
m_solverType = solvers[ 0 ]->getSolverType();
}
}
public:
// create the solvers for me
explicit MyConstraintSolverPool( int numSolvers )
{
btAlignedObjectArray<btConstraintSolver*> solvers;
solvers.reserve( numSolvers );
for ( int i = 0; i < numSolvers; ++i )
{
btConstraintSolver* solver = new btSequentialImpulseConstraintSolver();
solvers.push_back( solver );
}
init( &solvers[ 0 ], numSolvers );
}
// pass in fully constructed solvers (destructor will delete them)
MyConstraintSolverPool( btConstraintSolver** solvers, int numSolvers )
{
init( solvers, numSolvers );
}
virtual ~MyConstraintSolverPool()
{
// delete all solvers
for ( int i = 0; i < m_solvers.size(); ++i )
{
ThreadSolver& solver = m_solvers[ i ];
delete solver.solver;
solver.solver = NULL;
}
}
//virtual void prepareSolve( int /* numBodies */, int /* numManifolds */ ) { ; } // does nothing
///solve a group of constraints
virtual btScalar solveGroup( btCollisionObject** bodies,
int numBodies,
btPersistentManifold** manifolds,
int numManifolds,
btTypedConstraint** constraints,
int numConstraints,
const btContactSolverInfo& info,
btIDebugDraw* debugDrawer,
btDispatcher* dispatcher
)
{
ThreadSolver* solver = getAndLockThreadSolver();
solver->solver->solveGroup( bodies, numBodies, manifolds, numManifolds, constraints, numConstraints, info, debugDrawer, dispatcher );
solver->mutex.unlock();
return 0.0f;
}
//virtual void allSolved( const btContactSolverInfo& /* info */, class btIDebugDraw* /* debugDrawer */ ) { ; } // does nothing
///clear internal cached data and reset random seed
virtual void reset()
{
for ( int i = 0; i < m_solvers.size(); ++i )
{
ThreadSolver& solver = m_solvers[ i ];
solver.mutex.lock();
solver.solver->reset();
solver.mutex.unlock();
}
}
virtual btConstraintSolverType getSolverType() const
{
return m_solverType;
}
};
struct UpdateIslandDispatcher
{
btAlignedObjectArray<btSimulationIslandManagerMt::Island*>* islandsPtr;
btSimulationIslandManagerMt::IslandCallback* callback;
void forLoop( int iBegin, int iEnd ) const
{
for ( int i = iBegin; i < iEnd; ++i )
{
btSimulationIslandManagerMt::Island* island = ( *islandsPtr )[ i ];
btPersistentManifold** manifolds = island->manifoldArray.size() ? &island->manifoldArray[ 0 ] : NULL;
btTypedConstraint** constraintsPtr = island->constraintArray.size() ? &island->constraintArray[ 0 ] : NULL;
callback->processIsland( &island->bodyArray[ 0 ],
island->bodyArray.size(),
manifolds,
island->manifoldArray.size(),
constraintsPtr,
island->constraintArray.size(),
island->id
);
}
}
};
void parallelIslandDispatch( btAlignedObjectArray<btSimulationIslandManagerMt::Island*>* islandsPtr, btSimulationIslandManagerMt::IslandCallback* callback )
{
ProfileHelper prof(Profiler::kRecordDispatchIslands);
gNumIslands = islandsPtr->size(); gNumIslands = islandsPtr->size();
int grainSize = 1; // iterations per task btSimulationIslandManagerMt::parallelIslandDispatch( islandsPtr, callback );
UpdateIslandDispatcher dispatcher;
dispatcher.islandsPtr = islandsPtr;
dispatcher.callback = callback;
btPushThreadsAreRunning();
parallelFor( 0, islandsPtr->size(), grainSize, dispatcher );
btPopThreadsAreRunning();
}
#endif //#if USE_PARALLEL_ISLAND_SOLVER
void profileBeginCallback(btDynamicsWorld *world, btScalar timeStep)
{
gProfiler.begin(Profiler::kRecordInternalTimeStep);
} }
void profileEndCallback(btDynamicsWorld *world, btScalar timeStep)
{
gProfiler.end(Profiler::kRecordInternalTimeStep);
}
/// ///
/// MyDiscreteDynamicsWorld /// MyDiscreteDynamicsWorld -- subclassed for profiling purposes
///
/// Should function exactly like btDiscreteDynamicsWorld.
/// 3 methods that iterate over all of the rigidbodies can run in parallel:
/// - predictUnconstraintMotion
/// - integrateTransforms
/// - createPredictiveContacts
/// ///
ATTRIBUTE_ALIGNED16( class ) MyDiscreteDynamicsWorld : public btDiscreteDynamicsWorldMt ATTRIBUTE_ALIGNED16( class ) MyDiscreteDynamicsWorld : public btDiscreteDynamicsWorldMt
{ {
typedef btDiscreteDynamicsWorld ParentClass; typedef btDiscreteDynamicsWorldMt ParentClass;
protected: protected:
#if USE_PARALLEL_PREDICT_UNCONSTRAINED_MOTION
struct UpdaterUnconstrainedMotion
{
btScalar timeStep;
btRigidBody** rigidBodies;
void forLoop( int iBegin, int iEnd ) const
{
for ( int i = iBegin; i < iEnd; ++i )
{
btRigidBody* body = rigidBodies[ i ];
if ( !body->isStaticOrKinematicObject() )
{
//don't integrate/update velocities here, it happens in the constraint solver
body->applyDamping( timeStep );
body->predictIntegratedTransform( timeStep, body->getInterpolationWorldTransform() );
}
}
}
};
virtual void predictUnconstraintMotion( btScalar timeStep ) BT_OVERRIDE virtual void predictUnconstraintMotion( btScalar timeStep ) BT_OVERRIDE
{ {
ProfileHelper prof( Profiler::kRecordPredictUnconstrainedMotion ); ProfileHelper prof( Profiler::kRecordPredictUnconstrainedMotion );
BT_PROFILE( "predictUnconstraintMotion" ); ParentClass::predictUnconstraintMotion( timeStep );
int grainSize = 50; // num of iterations per task for TBB
int bodyCount = m_nonStaticRigidBodies.size();
UpdaterUnconstrainedMotion update;
update.timeStep = timeStep;
update.rigidBodies = bodyCount ? &m_nonStaticRigidBodies[ 0 ] : NULL;
btPushThreadsAreRunning();
parallelFor( 0, bodyCount, grainSize, update );
btPopThreadsAreRunning();
} }
#endif // #if USE_PARALLEL_PREDICT_UNCONSTRAINED_MOTION virtual void createPredictiveContacts( btScalar timeStep ) BT_OVERRIDE
#if USE_PARALLEL_CREATE_PREDICTIVE_CONTACTS
struct UpdaterCreatePredictiveContacts
{
btScalar timeStep;
btRigidBody** rigidBodies;
MyDiscreteDynamicsWorld* world;
void forLoop( int iBegin, int iEnd ) const
{
world->createPredictiveContactsInternal( &rigidBodies[ iBegin ], iEnd - iBegin, timeStep );
}
};
virtual void createPredictiveContacts( btScalar timeStep )
{ {
ProfileHelper prof( Profiler::kRecordCreatePredictiveContacts ); ProfileHelper prof( Profiler::kRecordCreatePredictiveContacts );
releasePredictiveContacts(); ParentClass::createPredictiveContacts( timeStep );
int grainSize = 50; // num of iterations per task for TBB or OPENMP
if ( int bodyCount = m_nonStaticRigidBodies.size() )
{
UpdaterCreatePredictiveContacts update;
update.world = this;
update.timeStep = timeStep;
update.rigidBodies = &m_nonStaticRigidBodies[ 0 ];
btPushThreadsAreRunning();
parallelFor( 0, bodyCount, grainSize, update );
btPopThreadsAreRunning();
}
} }
#endif // #if USE_PARALLEL_CREATE_PREDICTIVE_CONTACTS
#if USE_PARALLEL_INTEGRATE_TRANSFORMS
struct UpdaterIntegrateTransforms
{
btScalar timeStep;
btRigidBody** rigidBodies;
MyDiscreteDynamicsWorld* world;
void forLoop( int iBegin, int iEnd ) const
{
world->integrateTransformsInternal( &rigidBodies[ iBegin ], iEnd - iBegin, timeStep );
}
};
virtual void integrateTransforms( btScalar timeStep ) BT_OVERRIDE virtual void integrateTransforms( btScalar timeStep ) BT_OVERRIDE
{ {
ProfileHelper prof( Profiler::kRecordIntegrateTransforms ); ProfileHelper prof( Profiler::kRecordIntegrateTransforms );
BT_PROFILE( "integrateTransforms" ); ParentClass::integrateTransforms( timeStep );
int grainSize = 50; // num of iterations per task for TBB or OPENMP
if ( int bodyCount = m_nonStaticRigidBodies.size() )
{
UpdaterIntegrateTransforms update;
update.world = this;
update.timeStep = timeStep;
update.rigidBodies = &m_nonStaticRigidBodies[ 0 ];
btPushThreadsAreRunning();
parallelFor( 0, bodyCount, grainSize, update );
btPopThreadsAreRunning();
}
} }
#endif // #if USE_PARALLEL_INTEGRATE_TRANSFORMS
public: public:
BT_DECLARE_ALIGNED_ALLOCATOR(); BT_DECLARE_ALIGNED_ALLOCATOR();
MyDiscreteDynamicsWorld( btDispatcher* dispatcher, MyDiscreteDynamicsWorld( btDispatcher* dispatcher,
btBroadphaseInterface* pairCache, btBroadphaseInterface* pairCache,
btConstraintSolver* constraintSolver, btConstraintSolverPoolMt* constraintSolver,
btCollisionConfiguration* collisionConfiguration btCollisionConfiguration* collisionConfiguration
) : ) :
btDiscreteDynamicsWorldMt( dispatcher, pairCache, constraintSolver, collisionConfiguration ) btDiscreteDynamicsWorldMt( dispatcher, pairCache, constraintSolver, collisionConfiguration )
{ {
#if USE_PARALLEL_ISLAND_SOLVER
btSimulationIslandManagerMt* islandMgr = static_cast<btSimulationIslandManagerMt*>( m_islandManager ); btSimulationIslandManagerMt* islandMgr = static_cast<btSimulationIslandManagerMt*>( m_islandManager );
islandMgr->setIslandDispatchFunction( parallelIslandDispatch ); islandMgr->setIslandDispatchFunction( myParallelIslandDispatch );
#endif //#if USE_PARALLEL_ISLAND_SOLVER
} }
}; };
@@ -625,8 +239,69 @@ btConstraintSolver* createSolverByType( SolverType t )
} }
///
/// btTaskSchedulerManager -- manage a number of task schedulers so we can switch between them
///
class btTaskSchedulerManager
{
btAlignedObjectArray<btITaskScheduler*> m_taskSchedulers;
btAlignedObjectArray<btITaskScheduler*> m_allocatedTaskSchedulers;
public:
btTaskSchedulerManager() {}
void init()
{
addTaskScheduler( btGetSequentialTaskScheduler() );
#if BT_THREADSAFE
if ( btITaskScheduler* ts = createDefaultTaskScheduler() )
{
m_allocatedTaskSchedulers.push_back( ts );
addTaskScheduler( ts );
}
addTaskScheduler( btGetOpenMPTaskScheduler() );
addTaskScheduler( btGetTBBTaskScheduler() );
addTaskScheduler( btGetPPLTaskScheduler() );
if ( getNumTaskSchedulers() > 1 )
{
// prefer a non-sequential scheduler if available
btSetTaskScheduler( m_taskSchedulers[ 1 ] );
}
else
{
btSetTaskScheduler( m_taskSchedulers[ 0 ] );
}
#endif // #if BT_THREADSAFE
}
void shutdown()
{
for ( int i = 0; i < m_allocatedTaskSchedulers.size(); ++i )
{
delete m_allocatedTaskSchedulers[ i ];
}
m_allocatedTaskSchedulers.clear();
}
void addTaskScheduler( btITaskScheduler* ts )
{
if ( ts )
{
m_taskSchedulers.push_back( ts );
}
}
int getNumTaskSchedulers() const { return m_taskSchedulers.size(); }
btITaskScheduler* getTaskScheduler( int i ) { return m_taskSchedulers[ i ]; }
};
static btTaskSchedulerManager gTaskSchedulerMgr;
#if BT_THREADSAFE
static bool gMultithreadedWorld = true;
static bool gDisplayProfileInfo = true;
#else
static bool gMultithreadedWorld = false; static bool gMultithreadedWorld = false;
static bool gDisplayProfileInfo = false; static bool gDisplayProfileInfo = false;
#endif
static SolverType gSolverType = SOLVER_TYPE_SEQUENTIAL_IMPULSE; static SolverType gSolverType = SOLVER_TYPE_SEQUENTIAL_IMPULSE;
static int gSolverMode = SOLVER_SIMD | static int gSolverMode = SOLVER_SIMD |
SOLVER_USE_WARMSTARTING | SOLVER_USE_WARMSTARTING |
@@ -652,15 +327,17 @@ CommonRigidBodyMTBase::CommonRigidBodyMTBase( struct GUIHelperInterface* helper
{ {
m_multithreadedWorld = false; m_multithreadedWorld = false;
m_multithreadCapable = false; m_multithreadCapable = false;
gTaskMgr.init(); if ( gTaskSchedulerMgr.getNumTaskSchedulers() == 0 )
{
gTaskSchedulerMgr.init();
}
} }
CommonRigidBodyMTBase::~CommonRigidBodyMTBase() CommonRigidBodyMTBase::~CommonRigidBodyMTBase()
{ {
gTaskMgr.shutdown();
} }
void boolPtrButtonCallback(int buttonId, bool buttonState, void* userPointer) static void boolPtrButtonCallback(int buttonId, bool buttonState, void* userPointer)
{ {
if (bool* val = static_cast<bool*>(userPointer)) if (bool* val = static_cast<bool*>(userPointer))
{ {
@@ -668,7 +345,7 @@ void boolPtrButtonCallback(int buttonId, bool buttonState, void* userPointer)
} }
} }
void toggleSolverModeCallback(int buttonId, bool buttonState, void* userPointer) static void toggleSolverModeCallback(int buttonId, bool buttonState, void* userPointer)
{ {
if (buttonState) if (buttonState)
{ {
@@ -687,7 +364,7 @@ void toggleSolverModeCallback(int buttonId, bool buttonState, void* userPointer)
} }
} }
void setSolverTypeCallback(int buttonId, bool buttonState, void* userPointer) static void setSolverTypeCallback(int buttonId, bool buttonState, void* userPointer)
{ {
if (buttonId >= 0 && buttonId < SOLVER_TYPE_COUNT) if (buttonId >= 0 && buttonId < SOLVER_TYPE_COUNT)
{ {
@@ -695,32 +372,34 @@ void setSolverTypeCallback(int buttonId, bool buttonState, void* userPointer)
} }
} }
void apiSelectButtonCallback(int buttonId, bool buttonState, void* userPointer) static void setNumThreads( int numThreads )
{ {
gTaskMgr.setApi(static_cast<TaskManager::Api>(buttonId)); #if BT_THREADSAFE
if (gTaskMgr.getApi()==TaskManager::apiNone) int newNumThreads = ( std::min )( numThreads, int( BT_MAX_THREAD_COUNT ) );
int oldNumThreads = btGetTaskScheduler()->getNumThreads();
// only call when the thread count is different
if ( newNumThreads != oldNumThreads )
{ {
gSliderNumThreads = 1.0f; btGetTaskScheduler()->setNumThreads( newNumThreads );
}
else
{
gSliderNumThreads = float(gTaskMgr.getNumThreads());
} }
#endif // #if BT_THREADSAFE
} }
void setThreadCountCallback(float val, void* userPtr) static void apiSelectButtonCallback(int buttonId, bool buttonState, void* userPointer)
{ {
if (gTaskMgr.getApi()==TaskManager::apiNone) #if BT_THREADSAFE
{ // change the task scheduler
gSliderNumThreads = 1.0f; btSetTaskScheduler( gTaskSchedulerMgr.getTaskScheduler( buttonId ) );
} setNumThreads( int( gSliderNumThreads ) );
else #endif // #if BT_THREADSAFE
{
gTaskMgr.setNumThreads( int( gSliderNumThreads ) );
}
} }
void setSolverIterationCountCallback(float val, void* userPtr) static void setThreadCountCallback(float val, void* userPtr)
{
setNumThreads( int( gSliderNumThreads ) );
}
static void setSolverIterationCountCallback(float val, void* userPtr)
{ {
if (btDiscreteDynamicsWorld* world = reinterpret_cast<btDiscreteDynamicsWorld*>(userPtr)) if (btDiscreteDynamicsWorld* world = reinterpret_cast<btDiscreteDynamicsWorld*>(userPtr))
{ {
@@ -733,40 +412,37 @@ void CommonRigidBodyMTBase::createEmptyDynamicsWorld()
gNumIslands = 0; gNumIslands = 0;
m_solverType = gSolverType; m_solverType = gSolverType;
#if BT_THREADSAFE && (BT_USE_OPENMP || BT_USE_PPL || BT_USE_TBB) #if BT_THREADSAFE && (BT_USE_OPENMP || BT_USE_PPL || BT_USE_TBB)
btAssert( btGetTaskScheduler() != NULL );
m_multithreadCapable = true; m_multithreadCapable = true;
#endif #endif
if ( gMultithreadedWorld ) if ( gMultithreadedWorld )
{ {
#if BT_THREADSAFE
m_dispatcher = NULL; m_dispatcher = NULL;
btDefaultCollisionConstructionInfo cci; btDefaultCollisionConstructionInfo cci;
cci.m_defaultMaxPersistentManifoldPoolSize = 80000; cci.m_defaultMaxPersistentManifoldPoolSize = 80000;
cci.m_defaultMaxCollisionAlgorithmPoolSize = 80000; cci.m_defaultMaxCollisionAlgorithmPoolSize = 80000;
m_collisionConfiguration = new btDefaultCollisionConfiguration( cci ); m_collisionConfiguration = new btDefaultCollisionConfiguration( cci );
#if USE_PARALLEL_NARROWPHASE m_dispatcher = new MyCollisionDispatcher( m_collisionConfiguration, 40 );
m_dispatcher = new MyCollisionDispatcher( m_collisionConfiguration );
#else
m_dispatcher = new btCollisionDispatcher( m_collisionConfiguration );
#endif //USE_PARALLEL_NARROWPHASE
m_broadphase = new btDbvtBroadphase(); m_broadphase = new btDbvtBroadphase();
#if BT_THREADSAFE && USE_PARALLEL_ISLAND_SOLVER btConstraintSolverPoolMt* solverPool;
{ {
btConstraintSolver* solvers[ BT_MAX_THREAD_COUNT ]; btConstraintSolver* solvers[ BT_MAX_THREAD_COUNT ];
int maxThreadCount = btMin( int(BT_MAX_THREAD_COUNT), TaskManager::getMaxNumThreads() ); int maxThreadCount = BT_MAX_THREAD_COUNT;
for ( int i = 0; i < maxThreadCount; ++i ) for ( int i = 0; i < maxThreadCount; ++i )
{ {
solvers[ i ] = createSolverByType( m_solverType ); solvers[ i ] = createSolverByType( m_solverType );
} }
m_solver = new MyConstraintSolverPool( solvers, maxThreadCount ); solverPool = new btConstraintSolverPoolMt( solvers, maxThreadCount );
m_solver = solverPool;
} }
#else btDiscreteDynamicsWorld* world = new MyDiscreteDynamicsWorld( m_dispatcher, m_broadphase, solverPool, m_collisionConfiguration );
m_solver = createSolverByType( m_solverType );
#endif //#if USE_PARALLEL_ISLAND_SOLVER
btDiscreteDynamicsWorld* world = new MyDiscreteDynamicsWorld( m_dispatcher, m_broadphase, m_solver, m_collisionConfiguration );
m_dynamicsWorld = world; m_dynamicsWorld = world;
m_multithreadedWorld = true; m_multithreadedWorld = true;
btAssert( btGetTaskScheduler() != NULL );
#endif // #if BT_THREADSAFE
} }
else else
{ {
@@ -885,29 +561,32 @@ void CommonRigidBodyMTBase::createDefaultParameters()
} }
if (m_multithreadedWorld) if (m_multithreadedWorld)
{ {
#if BT_THREADSAFE
// create a button for each supported threading API // create a button for each supported threading API
for (int iApi = 0; iApi < TaskManager::apiCount; ++iApi) for ( int iApi = 0; iApi < gTaskSchedulerMgr.getNumTaskSchedulers(); ++iApi )
{ {
TaskManager::Api api = static_cast<TaskManager::Api>(iApi); char str[ 1024 ];
if (gTaskMgr.isSupported(api)) sprintf( str, "API %s", gTaskSchedulerMgr.getTaskScheduler(iApi)->getName() );
{ ButtonParams button( str, iApi, false );
char str[1024]; button.m_callback = apiSelectButtonCallback;
sprintf(str, "API %s", gTaskMgr.getApiName(api)); m_guiHelper->getParameterInterface()->registerButtonParameter( button );
ButtonParams button( str, iApi, false );
button.m_callback = apiSelectButtonCallback;
m_guiHelper->getParameterInterface()->registerButtonParameter( button );
}
} }
{ {
// create a slider to set the number of threads to use // create a slider to set the number of threads to use
gSliderNumThreads = float(gTaskMgr.getNumThreads()); int numThreads = btGetTaskScheduler()->getNumThreads();
// if slider has not been set yet (by another demo),
if ( gSliderNumThreads <= 1.0f )
{
gSliderNumThreads = float( numThreads );
}
SliderParams slider("Thread count", &gSliderNumThreads); SliderParams slider("Thread count", &gSliderNumThreads);
slider.m_minVal = 1.0f; slider.m_minVal = 1.0f;
slider.m_maxVal = float(gTaskMgr.getMaxNumThreads()*2); slider.m_maxVal = float( BT_MAX_THREAD_COUNT );
slider.m_callback = setThreadCountCallback; slider.m_callback = setThreadCountCallback;
slider.m_clampToIntegers = true; slider.m_clampToIntegers = true;
m_guiHelper->getParameterInterface()->registerSliderFloatParameter( slider ); m_guiHelper->getParameterInterface()->registerSliderFloatParameter( slider );
} }
#endif // #if BT_THREADSAFE
} }
} }
@@ -939,6 +618,7 @@ void CommonRigidBodyMTBase::drawScreenText()
{ {
if ( m_multithreadedWorld ) if ( m_multithreadedWorld )
{ {
#if BT_THREADSAFE
int numManifolds = m_dispatcher->getNumManifolds(); int numManifolds = m_dispatcher->getNumManifolds();
int numContacts = 0; int numContacts = 0;
for ( int i = 0; i < numManifolds; ++i ) for ( int i = 0; i < numManifolds; ++i )
@@ -946,17 +626,18 @@ void CommonRigidBodyMTBase::drawScreenText()
const btPersistentManifold* man = m_dispatcher->getManifoldByIndexInternal( i ); const btPersistentManifold* man = m_dispatcher->getManifoldByIndexInternal( i );
numContacts += man->getNumContacts(); numContacts += man->getNumContacts();
} }
const char* mtApi = TaskManager::getApiName( gTaskMgr.getApi() ); const char* mtApi = btGetTaskScheduler()->getName();
sprintf( msg, "islands=%d bodies=%d manifolds=%d contacts=%d [%s] threads=%d", sprintf( msg, "islands=%d bodies=%d manifolds=%d contacts=%d [%s] threads=%d",
gNumIslands, gNumIslands,
m_dynamicsWorld->getNumCollisionObjects(), m_dynamicsWorld->getNumCollisionObjects(),
numManifolds, numManifolds,
numContacts, numContacts,
mtApi, mtApi,
gTaskMgr.getApi() == TaskManager::apiNone ? 1 : gTaskMgr.getNumThreads() btGetTaskScheduler()->getNumThreads()
); );
m_guiHelper->getAppInterface()->drawText( msg, 100, yCoord, 0.4f ); m_guiHelper->getAppInterface()->drawText( msg, 100, yCoord, 0.4f );
yCoord += yStep; yCoord += yStep;
#endif // #if BT_THREADSAFE
} }
{ {
int sm = gSolverMode; int sm = gSolverMode;

View File

@@ -28,11 +28,10 @@ class btCollisionShape;
#include "btBulletCollisionCommon.h" #include "btBulletCollisionCommon.h"
#define BT_OVERRIDE
static btScalar gSliderStackRows = 8.0f; static btScalar gSliderStackRows = 8.0f;
static btScalar gSliderStackColumns = 6.0f; static btScalar gSliderStackColumns = 6.0f;
static btScalar gSliderStackHeight = 15.0f; static btScalar gSliderStackHeight = 10.0f;
static btScalar gSliderGroundHorizontalAmplitude = 0.0f; static btScalar gSliderGroundHorizontalAmplitude = 0.0f;
static btScalar gSliderGroundVerticalAmplitude = 0.0f; static btScalar gSliderGroundVerticalAmplitude = 0.0f;
@@ -240,7 +239,7 @@ void MultiThreadedDemo::createSceneObjects()
const btVector3 halfExtents = btVector3( 0.5f, 0.25f, 0.5f ); const btVector3 halfExtents = btVector3( 0.5f, 0.25f, 0.5f );
int numStackRows = btMax(1, int(gSliderStackRows)); int numStackRows = btMax(1, int(gSliderStackRows));
int numStackCols = btMax(1, int(gSliderStackColumns)); int numStackCols = btMax(1, int(gSliderStackColumns));
int stackHeight = 15; int stackHeight = int(gSliderStackHeight);
float stackZSpacing = 3.0f; float stackZSpacing = 3.0f;
float stackXSpacing = 20.0f; float stackXSpacing = 20.0f;

View File

@@ -1,336 +0,0 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include <stdio.h> //printf debugging
#include <algorithm>
// choose threading providers:
#if BT_USE_TBB
#define USE_TBB 1 // use Intel Threading Building Blocks for thread management
#endif
#if BT_USE_PPL
#define USE_PPL 1 // use Microsoft Parallel Patterns Library (installed with Visual Studio 2010 and later)
#endif // BT_USE_PPL
#if BT_USE_OPENMP
#define USE_OPENMP 1 // use OpenMP (also need to change compiler options for OpenMP support)
#endif
#if USE_OPENMP
#include <omp.h>
#endif // #if USE_OPENMP
#if USE_PPL
#include <ppl.h> // if you get a compile error here, check whether your version of Visual Studio includes PPL
// Visual Studio 2010 and later should come with it
#include <concrtrm.h> // for GetProcessorCount()
#endif // #if USE_PPL
#if USE_TBB
#define __TBB_NO_IMPLICIT_LINKAGE 1
#include <tbb/tbb.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/parallel_for.h>
#include <tbb/blocked_range.h>
#endif // #if USE_TBB
class TaskManager
{
public:
enum Api
{
apiNone,
apiOpenMP,
apiTbb,
apiPpl,
apiCount
};
static const char* getApiName( Api api )
{
switch ( api )
{
case apiNone: return "None";
case apiOpenMP: return "OpenMP";
case apiTbb: return "Intel TBB";
case apiPpl: return "MS PPL";
default: return "unknown";
}
}
TaskManager()
{
m_api = apiNone;
m_numThreads = 0;
#if USE_TBB
m_tbbSchedulerInit = NULL;
#endif // #if USE_TBB
}
Api getApi() const
{
return m_api;
}
bool isSupported( Api api ) const
{
#if USE_OPENMP
if ( api == apiOpenMP )
{
return true;
}
#endif
#if USE_TBB
if ( api == apiTbb )
{
return true;
}
#endif
#if USE_PPL
if ( api == apiPpl )
{
return true;
}
#endif
// apiNone is always "supported"
return api == apiNone;
}
void setApi( Api api )
{
if (isSupported(api))
{
m_api = api;
}
else
{
// no compile time support for selected API, fallback to "none"
m_api = apiNone;
}
}
static int getMaxNumThreads()
{
#if USE_OPENMP
return omp_get_max_threads();
#elif USE_PPL
return concurrency::GetProcessorCount();
#elif USE_TBB
return tbb::task_scheduler_init::default_num_threads();
#endif
return 1;
}
int getNumThreads() const
{
return m_numThreads;
}
int setNumThreads( int numThreads )
{
m_numThreads = ( std::max )( 1, numThreads );
#if USE_OPENMP
omp_set_num_threads( m_numThreads );
#endif
#if USE_PPL
{
using namespace concurrency;
if ( CurrentScheduler::Id() != -1 )
{
CurrentScheduler::Detach();
}
SchedulerPolicy policy;
policy.SetConcurrencyLimits( m_numThreads, m_numThreads );
CurrentScheduler::Create( policy );
}
#endif
#if USE_TBB
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
m_tbbSchedulerInit = new tbb::task_scheduler_init( m_numThreads );
#endif
return m_numThreads;
}
void init()
{
if (m_numThreads == 0)
{
#if USE_PPL
setApi( apiPpl );
#endif
#if USE_TBB
setApi( apiTbb );
#endif
#if USE_OPENMP
setApi( apiOpenMP );
#endif
setNumThreads(getMaxNumThreads());
}
else
{
setNumThreads(m_numThreads);
}
}
void shutdown()
{
#if USE_TBB
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
#endif
}
private:
Api m_api;
int m_numThreads;
#if USE_TBB
tbb::task_scheduler_init* m_tbbSchedulerInit;
#endif // #if USE_TBB
};
extern TaskManager gTaskMgr;
inline static void initTaskScheduler()
{
gTaskMgr.init();
}
inline static void cleanupTaskScheduler()
{
gTaskMgr.shutdown();
}
#if USE_TBB
///
/// TbbBodyAdapter -- Converts a body object that implements the
/// "forLoop(int iBegin, int iEnd) const" function
/// into a TBB compatible object that takes a tbb::blocked_range<int> type.
///
template <class TBody>
struct TbbBodyAdapter
{
const TBody* mBody;
void operator()( const tbb::blocked_range<int>& range ) const
{
mBody->forLoop( range.begin(), range.end() );
}
};
#endif // #if USE_TBB
#if USE_PPL
///
/// PplBodyAdapter -- Converts a body object that implements the
/// "forLoop(int iBegin, int iEnd) const" function
/// into a PPL compatible object that implements "void operator()( int ) const"
///
template <class TBody>
struct PplBodyAdapter
{
const TBody* mBody;
int mGrainSize;
int mIndexEnd;
void operator()( int i ) const
{
mBody->forLoop( i, (std::min)(i + mGrainSize, mIndexEnd) );
}
};
#endif // #if USE_PPL
///
/// parallelFor -- interface for submitting work expressed as a for loop to the worker threads
///
template <class TBody>
void parallelFor( int iBegin, int iEnd, int grainSize, const TBody& body )
{
#if USE_OPENMP
if ( gTaskMgr.getApi() == TaskManager::apiOpenMP )
{
#pragma omp parallel for schedule(static, 1)
for ( int i = iBegin; i < iEnd; i += grainSize )
{
body.forLoop( i, (std::min)( i + grainSize, iEnd ) );
}
return;
}
#endif // #if USE_OPENMP
#if USE_PPL
if ( gTaskMgr.getApi() == TaskManager::apiPpl )
{
// PPL dispatch
PplBodyAdapter<TBody> pplBody;
pplBody.mBody = &body;
pplBody.mGrainSize = grainSize;
pplBody.mIndexEnd = iEnd;
// note: MSVC 2010 doesn't support partitioner args, so avoid them
concurrency::parallel_for( iBegin,
iEnd,
grainSize,
pplBody
);
return;
}
#endif //#if USE_PPL
#if USE_TBB
if ( gTaskMgr.getApi() == TaskManager::apiTbb )
{
// TBB dispatch
TbbBodyAdapter<TBody> tbbBody;
tbbBody.mBody = &body;
tbb::parallel_for( tbb::blocked_range<int>( iBegin, iEnd, grainSize ),
tbbBody,
tbb::simple_partitioner()
);
return;
}
#endif // #if USE_TBB
{
// run on main thread
body.forLoop( iBegin, iEnd );
}
}

View File

@@ -279,9 +279,9 @@ void b3Win32ThreadSupport::startThreads(const Win32ThreadConstructionInfo& threa
} }
DWORD mask = 1; //SetThreadAffinityMask(handle, 1 << 1); // this is what it was doing originally, a complete disaster for threading performance!
mask = 1<<mask; //SetThreadAffinityMask(handle, 1 << i); // I'm guessing this was the intention, but is still bad for performance due to one of the threads
SetThreadAffinityMask(handle, mask); // sometimes unable to execute because it wants to be on the same processor as the main thread (my guess)
threadStatus.m_taskId = i; threadStatus.m_taskId = i;
threadStatus.m_commandId = 0; threadStatus.m_commandId = 0;

View File

@@ -0,0 +1,448 @@
#include "LinearMath/btTransform.h"
#include "../Utils/b3Clock.h"
#include "LinearMath/btAlignedObjectArray.h"
#include "LinearMath/btThreads.h"
#include "LinearMath/btQuickprof.h"
#include <stdio.h>
#include <algorithm>
typedef void( *btThreadFunc )( void* userPtr, void* lsMemory );
typedef void* ( *btThreadLocalStorageFunc )();
#if BT_THREADSAFE
#if defined( _WIN32 )
#include "b3Win32ThreadSupport.h"
b3ThreadSupportInterface* createThreadSupport( int numThreads, btThreadFunc threadFunc, btThreadLocalStorageFunc localStoreFunc, const char* uniqueName )
{
b3Win32ThreadSupport::Win32ThreadConstructionInfo constructionInfo( uniqueName, threadFunc, localStoreFunc, numThreads );
//constructionInfo.m_priority = 0; // highest priority (the default) -- can cause erratic performance when numThreads > numCores
// we don't want worker threads to be higher priority than the main thread or the main thread could get
// totally shut out and unable to tell the workers to stop
constructionInfo.m_priority = -1; // normal priority
b3Win32ThreadSupport* threadSupport = new b3Win32ThreadSupport( constructionInfo );
return threadSupport;
}
#else // #if defined( _WIN32 )
#include "b3PosixThreadSupport.h"
b3ThreadSupportInterface* createThreadSupport( int numThreads, btThreadFunc threadFunc, btThreadLocalStorageFunc localStoreFunc, const char* uniqueName)
{
b3PosixThreadSupport::ThreadConstructionInfo constructionInfo( uniqueName, threadFunc, localStoreFunc, numThreads );
b3ThreadSupportInterface* threadSupport = new b3PosixThreadSupport( constructionInfo );
return threadSupport;
}
#endif // #else // #if defined( _WIN32 )
///
/// getNumHardwareThreads()
///
///
/// https://stackoverflow.com/questions/150355/programmatically-find-the-number-of-cores-on-a-machine
///
#if __cplusplus >= 201103L
#include <thread>
int getNumHardwareThreads()
{
return std::thread::hardware_concurrency();
}
#elif defined( _WIN32 )
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
int getNumHardwareThreads()
{
// caps out at 32
SYSTEM_INFO info;
GetSystemInfo( &info );
return info.dwNumberOfProcessors;
}
#else
int getNumHardwareThreads()
{
return 0; // don't know
}
#endif
struct WorkerThreadStatus
{
enum Type
{
kInvalid,
kWaitingForWork,
kWorking,
kSleeping,
};
};
struct IJob
{
virtual void executeJob() = 0;
};
class ParallelForJob : public IJob
{
const btIParallelForBody* mBody;
int mBegin;
int mEnd;
public:
ParallelForJob()
{
mBody = NULL;
mBegin = 0;
mEnd = 0;
}
void init( int iBegin, int iEnd, const btIParallelForBody& body )
{
mBody = &body;
mBegin = iBegin;
mEnd = iEnd;
}
virtual void executeJob() override
{
BT_PROFILE( "executeJob" );
// call the functor body to do the work
mBody->forLoop( mBegin, mEnd );
}
};
struct JobContext
{
JobContext()
{
m_queueLock = NULL;
m_headIndex = 0;
m_tailIndex = 0;
m_workersShouldCheckQueue = false;
m_useSpinMutex = false;
}
b3CriticalSection* m_queueLock;
btSpinMutex m_mutex;
volatile bool m_workersShouldCheckQueue;
btAlignedObjectArray<IJob*> m_jobQueue;
bool m_queueIsEmpty;
int m_tailIndex;
int m_headIndex;
bool m_useSpinMutex;
void lockQueue()
{
if ( m_useSpinMutex )
{
m_mutex.lock();
}
else
{
m_queueLock->lock();
}
}
void unlockQueue()
{
if ( m_useSpinMutex )
{
m_mutex.unlock();
}
else
{
m_queueLock->unlock();
}
}
void clearQueue()
{
lockQueue();
m_headIndex = 0;
m_tailIndex = 0;
m_queueIsEmpty = true;
unlockQueue();
m_jobQueue.resizeNoInitialize( 0 );
}
void submitJob( IJob* job )
{
m_jobQueue.push_back( job );
lockQueue();
m_tailIndex++;
m_queueIsEmpty = false;
unlockQueue();
}
IJob* consumeJob()
{
if ( m_queueIsEmpty )
{
// lock free path. even if this is taken erroneously it isn't harmful
return NULL;
}
IJob* job = NULL;
lockQueue();
if ( !m_queueIsEmpty )
{
job = m_jobQueue[ m_headIndex++ ];
if ( m_headIndex == m_tailIndex )
{
m_queueIsEmpty = true;
}
}
unlockQueue();
return job;
}
};
struct WorkerThreadLocalStorage
{
int threadId;
WorkerThreadStatus::Type status;
};
static void WorkerThreadFunc( void* userPtr, void* lsMemory )
{
BT_PROFILE( "WorkerThreadFunc" );
WorkerThreadLocalStorage* localStorage = (WorkerThreadLocalStorage*) lsMemory;
localStorage->status = WorkerThreadStatus::kWaitingForWork;
//printf( "WorkerThreadFunc: worker %d start working\n", localStorage->threadId );
JobContext* jobContext = (JobContext*) userPtr;
while ( jobContext->m_workersShouldCheckQueue )
{
if ( IJob* job = jobContext->consumeJob() )
{
localStorage->status = WorkerThreadStatus::kWorking;
job->executeJob();
localStorage->status = WorkerThreadStatus::kWaitingForWork;
}
else
{
// todo: spin wait a bit to avoid hammering the empty queue
}
}
//printf( "WorkerThreadFunc stop working\n" );
localStorage->status = WorkerThreadStatus::kSleeping;
// go idle
}
static void* WorkerThreadAllocFunc()
{
return new WorkerThreadLocalStorage;
}
class btTaskSchedulerDefault : public btITaskScheduler
{
JobContext m_jobContext;
b3ThreadSupportInterface* m_threadSupport;
btAlignedObjectArray<ParallelForJob> m_jobs;
btSpinMutex m_antiNestingLock; // prevent nested parallel-for
int m_numThreads;
int m_numWorkerThreads;
int m_numWorkersRunning;
public:
btTaskSchedulerDefault() : btITaskScheduler("ThreadSupport")
{
m_threadSupport = NULL;
m_numThreads = getNumHardwareThreads();
// if can't detect number of cores,
if ( m_numThreads == 0 )
{
// take a guess
m_numThreads = 4;
}
m_numWorkerThreads = m_numThreads - 1;
m_numWorkersRunning = 0;
}
virtual ~btTaskSchedulerDefault()
{
shutdown();
}
void init()
{
int maxNumWorkerThreads = BT_MAX_THREAD_COUNT - 1;
m_threadSupport = createThreadSupport( maxNumWorkerThreads, WorkerThreadFunc, WorkerThreadAllocFunc, "TaskScheduler" );
m_jobContext.m_queueLock = m_threadSupport->createCriticalSection();
for ( int i = 0; i < maxNumWorkerThreads; i++ )
{
WorkerThreadLocalStorage* storage = (WorkerThreadLocalStorage*) m_threadSupport->getThreadLocalMemory( i );
btAssert( storage );
storage->threadId = i;
storage->status = WorkerThreadStatus::kSleeping;
}
setWorkersActive( false ); // no work for them yet
}
virtual void shutdown()
{
setWorkersActive( false );
waitForWorkersToSleep();
m_threadSupport->deleteCriticalSection( m_jobContext.m_queueLock );
m_jobContext.m_queueLock = NULL;
delete m_threadSupport;
m_threadSupport = NULL;
}
void setWorkersActive( bool active )
{
m_jobContext.m_workersShouldCheckQueue = active;
}
virtual int getMaxNumThreads() const BT_OVERRIDE
{
return BT_MAX_THREAD_COUNT;
}
virtual int getNumThreads() const BT_OVERRIDE
{
return m_numThreads;
}
virtual void setNumThreads( int numThreads ) BT_OVERRIDE
{
m_numThreads = btMax( btMin(numThreads, int(BT_MAX_THREAD_COUNT)), 1 );
m_numWorkerThreads = m_numThreads - 1;
}
void waitJobs()
{
BT_PROFILE( "waitJobs" );
// have the main thread work until the job queue is empty
for ( ;; )
{
if ( IJob* job = m_jobContext.consumeJob() )
{
job->executeJob();
}
else
{
break;
}
}
// done with jobs for now, tell workers to rest
setWorkersActive( false );
waitForWorkersToSleep();
}
void wakeWorkers()
{
BT_PROFILE( "wakeWorkers" );
btAssert( m_jobContext.m_workersShouldCheckQueue );
// tell each worker thread to start working
for ( int i = 0; i < m_numWorkerThreads; i++ )
{
m_threadSupport->runTask( B3_THREAD_SCHEDULE_TASK, &m_jobContext, i );
m_numWorkersRunning++;
}
}
void waitForWorkersToSleep()
{
BT_PROFILE( "waitForWorkersToSleep" );
while ( m_numWorkersRunning > 0 )
{
int iThread;
int threadStatus;
m_threadSupport->waitForResponse( &iThread, &threadStatus ); // wait for worker threads to finish working
m_numWorkersRunning--;
}
//m_threadSupport->waitForAllTasksToComplete();
for ( int i = 0; i < m_numWorkerThreads; i++ )
{
//m_threadSupport->waitForTaskCompleted( i );
WorkerThreadLocalStorage* storage = (WorkerThreadLocalStorage*) m_threadSupport->getThreadLocalMemory( i );
btAssert( storage );
btAssert( storage->status == WorkerThreadStatus::kSleeping );
}
}
virtual void parallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body ) BT_OVERRIDE
{
BT_PROFILE( "parallelFor_ThreadSupport" );
btAssert( iEnd >= iBegin );
btAssert( grainSize >= 1 );
int iterationCount = iEnd - iBegin;
if ( iterationCount > grainSize && m_numWorkerThreads > 0 && m_antiNestingLock.tryLock() )
{
int jobCount = ( iterationCount + grainSize - 1 ) / grainSize;
btAssert( jobCount >= 2 ); // need more than one job for multithreading
if ( jobCount > m_jobs.size() )
{
m_jobs.resize( jobCount );
}
if ( jobCount > m_jobContext.m_jobQueue.capacity() )
{
m_jobContext.m_jobQueue.reserve( jobCount );
}
m_jobContext.clearQueue();
// prepare worker threads for incoming work
setWorkersActive( true );
wakeWorkers();
// submit all of the jobs
int iJob = 0;
for ( int i = iBegin; i < iEnd; i += grainSize )
{
btAssert( iJob < jobCount );
int iE = btMin( i + grainSize, iEnd );
ParallelForJob& job = m_jobs[ iJob ];
job.init( i, iE, body );
m_jobContext.submitJob( &job );
iJob++;
}
// put the main thread to work on emptying the job queue and then wait for all workers to finish
waitJobs();
m_antiNestingLock.unlock();
}
else
{
BT_PROFILE( "parallelFor_mainThread" );
// just run on main thread
body.forLoop( iBegin, iEnd );
}
}
};
btITaskScheduler* createDefaultTaskScheduler()
{
btTaskSchedulerDefault* ts = new btTaskSchedulerDefault();
ts->init();
return ts;
}
#else // #if BT_THREADSAFE
btITaskScheduler* createDefaultTaskScheduler()
{
return NULL;
}
#endif // #else // #if BT_THREADSAFE

View File

@@ -0,0 +1,26 @@
/*
Copyright (c) 2003-2014 Erwin Coumans http://bullet.googlecode.com
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef BT_TASK_SCHEDULER_H
#define BT_TASK_SCHEDULER_H
class btITaskScheduler;
btITaskScheduler* createDefaultTaskScheduler();
#endif // BT_TASK_SCHEDULER_H

View File

@@ -184,7 +184,7 @@ void MyEnterProfileZoneFunc(const char* msg)
return; return;
#ifndef BT_NO_PROFILE #ifndef BT_NO_PROFILE
int threadId = btQuickprofGetCurrentThreadIndex2(); int threadId = btQuickprofGetCurrentThreadIndex2();
if (threadId<0) if (threadId<0 || threadId >= BT_QUICKPROF_MAX_THREAD_COUNT)
return; return;
if (gStackDepths[threadId] >= MAX_NESTING) if (gStackDepths[threadId] >= MAX_NESTING)
@@ -208,8 +208,8 @@ void MyLeaveProfileZoneFunc()
return; return;
#ifndef BT_NO_PROFILE #ifndef BT_NO_PROFILE
int threadId = btQuickprofGetCurrentThreadIndex2(); int threadId = btQuickprofGetCurrentThreadIndex2();
if (threadId<0) if (threadId<0 || threadId >= BT_QUICKPROF_MAX_THREAD_COUNT)
return; return;
if (gStackDepths[threadId] <= 0) if (gStackDepths[threadId] <= 0)
{ {

View File

@@ -15,6 +15,7 @@ SET(BulletCollision_SRCS
CollisionDispatch/btBox2dBox2dCollisionAlgorithm.cpp CollisionDispatch/btBox2dBox2dCollisionAlgorithm.cpp
CollisionDispatch/btBoxBoxDetector.cpp CollisionDispatch/btBoxBoxDetector.cpp
CollisionDispatch/btCollisionDispatcher.cpp CollisionDispatch/btCollisionDispatcher.cpp
CollisionDispatch/btCollisionDispatcherMt.cpp
CollisionDispatch/btCollisionObject.cpp CollisionDispatch/btCollisionObject.cpp
CollisionDispatch/btCollisionWorld.cpp CollisionDispatch/btCollisionWorld.cpp
CollisionDispatch/btCollisionWorldImporter.cpp CollisionDispatch/btCollisionWorldImporter.cpp
@@ -123,6 +124,7 @@ SET(CollisionDispatch_HDRS
CollisionDispatch/btCollisionConfiguration.h CollisionDispatch/btCollisionConfiguration.h
CollisionDispatch/btCollisionCreateFunc.h CollisionDispatch/btCollisionCreateFunc.h
CollisionDispatch/btCollisionDispatcher.h CollisionDispatch/btCollisionDispatcher.h
CollisionDispatch/btCollisionDispatcherMt.h
CollisionDispatch/btCollisionObject.h CollisionDispatch/btCollisionObject.h
CollisionDispatch/btCollisionObjectWrapper.h CollisionDispatch/btCollisionObjectWrapper.h
CollisionDispatch/btCollisionWorld.h CollisionDispatch/btCollisionWorld.h

View File

@@ -0,0 +1,164 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "btCollisionDispatcherMt.h"
#include "LinearMath/btQuickprof.h"
#include "BulletCollision/BroadphaseCollision/btCollisionAlgorithm.h"
#include "BulletCollision/CollisionShapes/btCollisionShape.h"
#include "BulletCollision/CollisionDispatch/btCollisionObject.h"
#include "BulletCollision/BroadphaseCollision/btOverlappingPairCache.h"
#include "LinearMath/btPoolAllocator.h"
#include "BulletCollision/CollisionDispatch/btCollisionConfiguration.h"
#include "BulletCollision/CollisionDispatch/btCollisionObjectWrapper.h"
btCollisionDispatcherMt::btCollisionDispatcherMt( btCollisionConfiguration* config, int grainSize )
: btCollisionDispatcher( config )
{
m_batchUpdating = false;
m_grainSize = grainSize; // iterations per task
}
btPersistentManifold* btCollisionDispatcherMt::getNewManifold( const btCollisionObject* body0, const btCollisionObject* body1 )
{
//optional relative contact breaking threshold, turned on by default (use setDispatcherFlags to switch off feature for improved performance)
btScalar contactBreakingThreshold = ( m_dispatcherFlags & btCollisionDispatcher::CD_USE_RELATIVE_CONTACT_BREAKING_THRESHOLD ) ?
btMin( body0->getCollisionShape()->getContactBreakingThreshold( gContactBreakingThreshold ), body1->getCollisionShape()->getContactBreakingThreshold( gContactBreakingThreshold ) )
: gContactBreakingThreshold;
btScalar contactProcessingThreshold = btMin( body0->getContactProcessingThreshold(), body1->getContactProcessingThreshold() );
void* mem = m_persistentManifoldPoolAllocator->allocate( sizeof( btPersistentManifold ) );
if ( NULL == mem )
{
//we got a pool memory overflow, by default we fallback to dynamically allocate memory. If we require a contiguous contact pool then assert.
if ( ( m_dispatcherFlags&CD_DISABLE_CONTACTPOOL_DYNAMIC_ALLOCATION ) == 0 )
{
mem = btAlignedAlloc( sizeof( btPersistentManifold ), 16 );
}
else
{
btAssert( 0 );
//make sure to increase the m_defaultMaxPersistentManifoldPoolSize in the btDefaultCollisionConstructionInfo/btDefaultCollisionConfiguration
return 0;
}
}
btPersistentManifold* manifold = new( mem ) btPersistentManifold( body0, body1, 0, contactBreakingThreshold, contactProcessingThreshold );
if ( !m_batchUpdating )
{
// batch updater will update manifold pointers array after finishing, so
// only need to update array when not batch-updating
btAssert( !btThreadsAreRunning() );
manifold->m_index1a = m_manifoldsPtr.size();
m_manifoldsPtr.push_back( manifold );
}
return manifold;
}
void btCollisionDispatcherMt::releaseManifold( btPersistentManifold* manifold )
{
clearManifold( manifold );
btAssert( !btThreadsAreRunning() );
if ( !m_batchUpdating )
{
// batch updater will update manifold pointers array after finishing, so
// only need to update array when not batch-updating
int findIndex = manifold->m_index1a;
btAssert( findIndex < m_manifoldsPtr.size() );
m_manifoldsPtr.swap( findIndex, m_manifoldsPtr.size() - 1 );
m_manifoldsPtr[ findIndex ]->m_index1a = findIndex;
m_manifoldsPtr.pop_back();
}
manifold->~btPersistentManifold();
if ( m_persistentManifoldPoolAllocator->validPtr( manifold ) )
{
m_persistentManifoldPoolAllocator->freeMemory( manifold );
}
else
{
btAlignedFree( manifold );
}
}
struct CollisionDispatcherUpdater : public btIParallelForBody
{
btBroadphasePair* mPairArray;
btNearCallback mCallback;
btCollisionDispatcher* mDispatcher;
const btDispatcherInfo* mInfo;
CollisionDispatcherUpdater()
{
mPairArray = NULL;
mCallback = NULL;
mDispatcher = NULL;
mInfo = NULL;
}
void forLoop( int iBegin, int iEnd ) const
{
for ( int i = iBegin; i < iEnd; ++i )
{
btBroadphasePair* pair = &mPairArray[ i ];
mCallback( *pair, *mDispatcher, *mInfo );
}
}
};
void btCollisionDispatcherMt::dispatchAllCollisionPairs( btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher )
{
int pairCount = pairCache->getNumOverlappingPairs();
if ( pairCount == 0 )
{
return;
}
CollisionDispatcherUpdater updater;
updater.mCallback = getNearCallback();
updater.mPairArray = pairCache->getOverlappingPairArrayPtr();
updater.mDispatcher = this;
updater.mInfo = &info;
m_batchUpdating = true;
btParallelFor( 0, pairCount, m_grainSize, updater );
m_batchUpdating = false;
// reconstruct the manifolds array to ensure determinism
m_manifoldsPtr.resizeNoInitialize( 0 );
btBroadphasePair* pairs = pairCache->getOverlappingPairArrayPtr();
for ( int i = 0; i < pairCount; ++i )
{
if (btCollisionAlgorithm* algo = pairs[ i ].m_algorithm)
{
algo->getAllContactManifolds( m_manifoldsPtr );
}
}
// update the indices (used when releasing manifolds)
for ( int i = 0; i < m_manifoldsPtr.size(); ++i )
{
m_manifoldsPtr[ i ]->m_index1a = i;
}
}

View File

@@ -0,0 +1,39 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef BT_COLLISION_DISPATCHER_MT_H
#define BT_COLLISION_DISPATCHER_MT_H
#include "BulletCollision/CollisionDispatch/btCollisionDispatcher.h"
#include "LinearMath/btThreads.h"
class btCollisionDispatcherMt : public btCollisionDispatcher
{
public:
btCollisionDispatcherMt( btCollisionConfiguration* config, int grainSize = 40 );
virtual btPersistentManifold* getNewManifold( const btCollisionObject* body0, const btCollisionObject* body1 ) BT_OVERRIDE;
virtual void releaseManifold( btPersistentManifold* manifold ) BT_OVERRIDE;
virtual void dispatchAllCollisionPairs( btOverlappingPairCache* pairCache, const btDispatcherInfo& info, btDispatcher* dispatcher ) BT_OVERRIDE;
protected:
bool m_batchUpdating;
int m_grainSize;
};
#endif //BT_COLLISION_DISPATCHER_MT_H

View File

@@ -108,8 +108,108 @@ struct InplaceSolverIslandCallbackMt : public btSimulationIslandManagerMt::Islan
}; };
///
/// btConstraintSolverPoolMt
///
btDiscreteDynamicsWorldMt::btDiscreteDynamicsWorldMt(btDispatcher* dispatcher,btBroadphaseInterface* pairCache,btConstraintSolver* constraintSolver, btCollisionConfiguration* collisionConfiguration) btConstraintSolverPoolMt::ThreadSolver* btConstraintSolverPoolMt::getAndLockThreadSolver()
{
int i = 0;
#if BT_THREADSAFE
i = btGetCurrentThreadIndex() % m_solvers.size();
#endif // #if BT_THREADSAFE
while ( true )
{
ThreadSolver& solver = m_solvers[ i ];
if ( solver.mutex.tryLock() )
{
return &solver;
}
// failed, try the next one
i = ( i + 1 ) % m_solvers.size();
}
return NULL;
}
void btConstraintSolverPoolMt::init( btConstraintSolver** solvers, int numSolvers )
{
m_solverType = BT_SEQUENTIAL_IMPULSE_SOLVER;
m_solvers.resize( numSolvers );
for ( int i = 0; i < numSolvers; ++i )
{
m_solvers[ i ].solver = solvers[ i ];
}
if ( numSolvers > 0 )
{
m_solverType = solvers[ 0 ]->getSolverType();
}
}
// create the solvers for me
btConstraintSolverPoolMt::btConstraintSolverPoolMt( int numSolvers )
{
btAlignedObjectArray<btConstraintSolver*> solvers;
solvers.reserve( numSolvers );
for ( int i = 0; i < numSolvers; ++i )
{
btConstraintSolver* solver = new btSequentialImpulseConstraintSolver();
solvers.push_back( solver );
}
init( &solvers[ 0 ], numSolvers );
}
// pass in fully constructed solvers (destructor will delete them)
btConstraintSolverPoolMt::btConstraintSolverPoolMt( btConstraintSolver** solvers, int numSolvers )
{
init( solvers, numSolvers );
}
btConstraintSolverPoolMt::~btConstraintSolverPoolMt()
{
// delete all solvers
for ( int i = 0; i < m_solvers.size(); ++i )
{
ThreadSolver& solver = m_solvers[ i ];
delete solver.solver;
solver.solver = NULL;
}
}
///solve a group of constraints
btScalar btConstraintSolverPoolMt::solveGroup( btCollisionObject** bodies,
int numBodies,
btPersistentManifold** manifolds,
int numManifolds,
btTypedConstraint** constraints,
int numConstraints,
const btContactSolverInfo& info,
btIDebugDraw* debugDrawer,
btDispatcher* dispatcher
)
{
ThreadSolver* ts = getAndLockThreadSolver();
ts->solver->solveGroup( bodies, numBodies, manifolds, numManifolds, constraints, numConstraints, info, debugDrawer, dispatcher );
ts->mutex.unlock();
return 0.0f;
}
void btConstraintSolverPoolMt::reset()
{
for ( int i = 0; i < m_solvers.size(); ++i )
{
ThreadSolver& solver = m_solvers[ i ];
solver.mutex.lock();
solver.solver->reset();
solver.mutex.unlock();
}
}
///
/// btDiscreteDynamicsWorldMt
///
btDiscreteDynamicsWorldMt::btDiscreteDynamicsWorldMt(btDispatcher* dispatcher, btBroadphaseInterface* pairCache, btConstraintSolverPoolMt* constraintSolver, btCollisionConfiguration* collisionConfiguration)
: btDiscreteDynamicsWorld(dispatcher,pairCache,constraintSolver,collisionConfiguration) : btDiscreteDynamicsWorld(dispatcher,pairCache,constraintSolver,collisionConfiguration)
{ {
if (m_ownsIslandManager) if (m_ownsIslandManager)
@@ -124,8 +224,8 @@ btDiscreteDynamicsWorldMt::btDiscreteDynamicsWorldMt(btDispatcher* dispatcher,bt
{ {
void* mem = btAlignedAlloc(sizeof(btSimulationIslandManagerMt),16); void* mem = btAlignedAlloc(sizeof(btSimulationIslandManagerMt),16);
btSimulationIslandManagerMt* im = new (mem) btSimulationIslandManagerMt(); btSimulationIslandManagerMt* im = new (mem) btSimulationIslandManagerMt();
m_islandManager = im;
im->setMinimumSolverBatchSize( m_solverInfo.m_minimumSolverBatchSize ); im->setMinimumSolverBatchSize( m_solverInfo.m_minimumSolverBatchSize );
m_islandManager = im;
} }
} }
@@ -145,7 +245,7 @@ btDiscreteDynamicsWorldMt::~btDiscreteDynamicsWorldMt()
} }
void btDiscreteDynamicsWorldMt::solveConstraints(btContactSolverInfo& solverInfo) void btDiscreteDynamicsWorldMt::solveConstraints(btContactSolverInfo& solverInfo)
{ {
BT_PROFILE("solveConstraints"); BT_PROFILE("solveConstraints");
@@ -160,3 +260,68 @@ void btDiscreteDynamicsWorldMt::solveConstraints(btContactSolverInfo& solverInfo
} }
struct UpdaterUnconstrainedMotion : public btIParallelForBody
{
btScalar timeStep;
btRigidBody** rigidBodies;
void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
{
for ( int i = iBegin; i < iEnd; ++i )
{
btRigidBody* body = rigidBodies[ i ];
if ( !body->isStaticOrKinematicObject() )
{
//don't integrate/update velocities here, it happens in the constraint solver
body->applyDamping( timeStep );
body->predictIntegratedTransform( timeStep, body->getInterpolationWorldTransform() );
}
}
}
};
void btDiscreteDynamicsWorldMt::predictUnconstraintMotion( btScalar timeStep )
{
BT_PROFILE( "predictUnconstraintMotion" );
if ( m_nonStaticRigidBodies.size() > 0 )
{
UpdaterUnconstrainedMotion update;
update.timeStep = timeStep;
update.rigidBodies = &m_nonStaticRigidBodies[ 0 ];
int grainSize = 50; // num of iterations per task for task scheduler
btParallelFor( 0, m_nonStaticRigidBodies.size(), grainSize, update );
}
}
void btDiscreteDynamicsWorldMt::createPredictiveContacts( btScalar timeStep )
{
BT_PROFILE( "createPredictiveContacts" );
releasePredictiveContacts();
if ( m_nonStaticRigidBodies.size() > 0 )
{
UpdaterCreatePredictiveContacts update;
update.world = this;
update.timeStep = timeStep;
update.rigidBodies = &m_nonStaticRigidBodies[ 0 ];
int grainSize = 50; // num of iterations per task for task scheduler
btParallelFor( 0, m_nonStaticRigidBodies.size(), grainSize, update );
}
}
void btDiscreteDynamicsWorldMt::integrateTransforms( btScalar timeStep )
{
BT_PROFILE( "integrateTransforms" );
if ( m_nonStaticRigidBodies.size() > 0 )
{
UpdaterIntegrateTransforms update;
update.world = this;
update.timeStep = timeStep;
update.rigidBodies = &m_nonStaticRigidBodies[ 0 ];
int grainSize = 50; // num of iterations per task for task scheduler
btParallelFor( 0, m_nonStaticRigidBodies.size(), grainSize, update );
}
}

View File

@@ -18,24 +18,116 @@ subject to the following restrictions:
#define BT_DISCRETE_DYNAMICS_WORLD_MT_H #define BT_DISCRETE_DYNAMICS_WORLD_MT_H
#include "btDiscreteDynamicsWorld.h" #include "btDiscreteDynamicsWorld.h"
#include "btSimulationIslandManagerMt.h"
#include "BulletDynamics/ConstraintSolver/btConstraintSolver.h"
struct InplaceSolverIslandCallbackMt; struct InplaceSolverIslandCallbackMt;
///
/// btConstraintSolverPoolMt - masquerades as a constraint solver, but really it is a threadsafe pool of them.
///
/// Each solver in the pool is protected by a mutex. When solveGroup is called from a thread,
/// the pool looks for a solver that isn't being used by another thread, locks it, and dispatches the
/// call to the solver.
/// So long as there are at least as many solvers as there are hardware threads, it should never need to
/// spin wait.
///
class btConstraintSolverPoolMt : public btConstraintSolver
{
public:
// create the solvers for me
explicit btConstraintSolverPoolMt( int numSolvers );
// pass in fully constructed solvers (destructor will delete them)
btConstraintSolverPoolMt( btConstraintSolver** solvers, int numSolvers );
virtual ~btConstraintSolverPoolMt();
///solve a group of constraints
virtual btScalar solveGroup( btCollisionObject** bodies,
int numBodies,
btPersistentManifold** manifolds,
int numManifolds,
btTypedConstraint** constraints,
int numConstraints,
const btContactSolverInfo& info,
btIDebugDraw* debugDrawer,
btDispatcher* dispatcher
) BT_OVERRIDE;
virtual void reset() BT_OVERRIDE;
virtual btConstraintSolverType getSolverType() const BT_OVERRIDE { return m_solverType; }
private:
const static size_t kCacheLineSize = 128;
struct ThreadSolver
{
btConstraintSolver* solver;
btSpinMutex mutex;
char _cachelinePadding[ kCacheLineSize - sizeof( btSpinMutex ) - sizeof( void* ) ]; // keep mutexes from sharing a cache line
};
btAlignedObjectArray<ThreadSolver> m_solvers;
btConstraintSolverType m_solverType;
ThreadSolver* getAndLockThreadSolver();
void init( btConstraintSolver** solvers, int numSolvers );
};
/// ///
/// btDiscreteDynamicsWorldMt -- a version of DiscreteDynamicsWorld with some minor changes to support /// btDiscreteDynamicsWorldMt -- a version of DiscreteDynamicsWorld with some minor changes to support
/// solving simulation islands on multiple threads. /// solving simulation islands on multiple threads.
/// ///
/// Should function exactly like btDiscreteDynamicsWorld.
/// Also 3 methods that iterate over all of the rigidbodies can run in parallel:
/// - predictUnconstraintMotion
/// - integrateTransforms
/// - createPredictiveContacts
///
ATTRIBUTE_ALIGNED16(class) btDiscreteDynamicsWorldMt : public btDiscreteDynamicsWorld ATTRIBUTE_ALIGNED16(class) btDiscreteDynamicsWorldMt : public btDiscreteDynamicsWorld
{ {
protected: protected:
InplaceSolverIslandCallbackMt* m_solverIslandCallbackMt; InplaceSolverIslandCallbackMt* m_solverIslandCallbackMt;
virtual void solveConstraints(btContactSolverInfo& solverInfo); virtual void solveConstraints(btContactSolverInfo& solverInfo) BT_OVERRIDE;
virtual void predictUnconstraintMotion( btScalar timeStep ) BT_OVERRIDE;
struct UpdaterCreatePredictiveContacts : public btIParallelForBody
{
btScalar timeStep;
btRigidBody** rigidBodies;
btDiscreteDynamicsWorldMt* world;
void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
{
world->createPredictiveContactsInternal( &rigidBodies[ iBegin ], iEnd - iBegin, timeStep );
}
};
virtual void createPredictiveContacts( btScalar timeStep ) BT_OVERRIDE;
struct UpdaterIntegrateTransforms : public btIParallelForBody
{
btScalar timeStep;
btRigidBody** rigidBodies;
btDiscreteDynamicsWorldMt* world;
void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
{
world->integrateTransformsInternal( &rigidBodies[ iBegin ], iEnd - iBegin, timeStep );
}
};
virtual void integrateTransforms( btScalar timeStep ) BT_OVERRIDE;
public: public:
BT_DECLARE_ALIGNED_ALLOCATOR(); BT_DECLARE_ALIGNED_ALLOCATOR();
btDiscreteDynamicsWorldMt(btDispatcher* dispatcher,btBroadphaseInterface* pairCache,btConstraintSolver* constraintSolver,btCollisionConfiguration* collisionConfiguration); btDiscreteDynamicsWorldMt(btDispatcher* dispatcher,
btBroadphaseInterface* pairCache,
btConstraintSolverPoolMt* constraintSolver, // Note this should be a solver-pool for multi-threading
btCollisionConfiguration* collisionConfiguration
);
virtual ~btDiscreteDynamicsWorldMt(); virtual ~btDiscreteDynamicsWorldMt();
}; };

View File

@@ -15,6 +15,7 @@ subject to the following restrictions:
#include "LinearMath/btScalar.h" #include "LinearMath/btScalar.h"
#include "LinearMath/btThreads.h"
#include "btSimulationIslandManagerMt.h" #include "btSimulationIslandManagerMt.h"
#include "BulletCollision/BroadphaseCollision/btDispatcher.h" #include "BulletCollision/BroadphaseCollision/btDispatcher.h"
#include "BulletCollision/NarrowPhaseCollision/btPersistentManifold.h" #include "BulletCollision/NarrowPhaseCollision/btPersistentManifold.h"
@@ -44,7 +45,7 @@ btSimulationIslandManagerMt::btSimulationIslandManagerMt()
{ {
m_minimumSolverBatchSize = calcBatchCost(0, 128, 0); m_minimumSolverBatchSize = calcBatchCost(0, 128, 0);
m_batchIslandMinBodyCount = 32; m_batchIslandMinBodyCount = 32;
m_islandDispatch = defaultIslandDispatch; m_islandDispatch = parallelIslandDispatch;
m_batchIsland = NULL; m_batchIsland = NULL;
} }
@@ -545,8 +546,9 @@ void btSimulationIslandManagerMt::mergeIslands()
} }
void btSimulationIslandManagerMt::defaultIslandDispatch( btAlignedObjectArray<Island*>* islandsPtr, IslandCallback* callback ) void btSimulationIslandManagerMt::serialIslandDispatch( btAlignedObjectArray<Island*>* islandsPtr, IslandCallback* callback )
{ {
BT_PROFILE( "serialIslandDispatch" );
// serial dispatch // serial dispatch
btAlignedObjectArray<Island*>& islands = *islandsPtr; btAlignedObjectArray<Island*>& islands = *islandsPtr;
for ( int i = 0; i < islands.size(); ++i ) for ( int i = 0; i < islands.size(); ++i )
@@ -565,6 +567,41 @@ void btSimulationIslandManagerMt::defaultIslandDispatch( btAlignedObjectArray<Is
} }
} }
struct UpdateIslandDispatcher : public btIParallelForBody
{
btAlignedObjectArray<btSimulationIslandManagerMt::Island*>* islandsPtr;
btSimulationIslandManagerMt::IslandCallback* callback;
void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
{
for ( int i = iBegin; i < iEnd; ++i )
{
btSimulationIslandManagerMt::Island* island = ( *islandsPtr )[ i ];
btPersistentManifold** manifolds = island->manifoldArray.size() ? &island->manifoldArray[ 0 ] : NULL;
btTypedConstraint** constraintsPtr = island->constraintArray.size() ? &island->constraintArray[ 0 ] : NULL;
callback->processIsland( &island->bodyArray[ 0 ],
island->bodyArray.size(),
manifolds,
island->manifoldArray.size(),
constraintsPtr,
island->constraintArray.size(),
island->id
);
}
}
};
void btSimulationIslandManagerMt::parallelIslandDispatch( btAlignedObjectArray<Island*>* islandsPtr, IslandCallback* callback )
{
BT_PROFILE( "parallelIslandDispatch" );
int grainSize = 1; // iterations per task
UpdateIslandDispatcher dispatcher;
dispatcher.islandsPtr = islandsPtr;
dispatcher.callback = callback;
btParallelFor( 0, islandsPtr->size(), grainSize, dispatcher );
}
///@todo: this is random access, it can be walked 'cache friendly'! ///@todo: this is random access, it can be walked 'cache friendly'!
void btSimulationIslandManagerMt::buildAndProcessIslands( btDispatcher* dispatcher, void btSimulationIslandManagerMt::buildAndProcessIslands( btDispatcher* dispatcher,
btCollisionWorld* collisionWorld, btCollisionWorld* collisionWorld,

View File

@@ -59,7 +59,8 @@ public:
) = 0; ) = 0;
}; };
typedef void( *IslandDispatchFunc ) ( btAlignedObjectArray<Island*>* islands, IslandCallback* callback ); typedef void( *IslandDispatchFunc ) ( btAlignedObjectArray<Island*>* islands, IslandCallback* callback );
static void defaultIslandDispatch( btAlignedObjectArray<Island*>* islands, IslandCallback* callback ); static void serialIslandDispatch( btAlignedObjectArray<Island*>* islands, IslandCallback* callback );
static void parallelIslandDispatch( btAlignedObjectArray<Island*>* islandsPtr, IslandCallback* callback );
protected: protected:
btAlignedObjectArray<Island*> m_allocatedIslands; // owner of all Islands btAlignedObjectArray<Island*> m_allocatedIslands; // owner of all Islands
btAlignedObjectArray<Island*> m_activeIslands; // islands actively in use btAlignedObjectArray<Island*> m_activeIslands; // islands actively in use

View File

@@ -14,6 +14,254 @@ subject to the following restrictions:
#include "btThreads.h" #include "btThreads.h"
#include "btQuickprof.h"
#include <algorithm> // for min and max
#if BT_THREADSAFE
#if BT_USE_OPENMP
#include <omp.h>
#endif // #if BT_USE_OPENMP
#if BT_USE_PPL
// use Microsoft Parallel Patterns Library (installed with Visual Studio 2010 and later)
#include <ppl.h> // if you get a compile error here, check whether your version of Visual Studio includes PPL
// Visual Studio 2010 and later should come with it
#include <concrtrm.h> // for GetProcessorCount()
#endif // #if BT_USE_PPL
#if BT_USE_TBB
// use Intel Threading Building Blocks for thread management
#define __TBB_NO_IMPLICIT_LINKAGE 1
#include <tbb/tbb.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/parallel_for.h>
#include <tbb/blocked_range.h>
#endif // #if BT_USE_TBB
static btITaskScheduler* gBtTaskScheduler;
static int gThreadsRunningCounter = 0; // useful for detecting if we are trying to do nested parallel-for calls
static btSpinMutex gThreadsRunningCounterMutex;
void btPushThreadsAreRunning()
{
gThreadsRunningCounterMutex.lock();
gThreadsRunningCounter++;
gThreadsRunningCounterMutex.unlock();
}
void btPopThreadsAreRunning()
{
gThreadsRunningCounterMutex.lock();
gThreadsRunningCounter--;
gThreadsRunningCounterMutex.unlock();
}
bool btThreadsAreRunning()
{
return gThreadsRunningCounter != 0;
}
void btSetTaskScheduler( btITaskScheduler* ts )
{
gBtTaskScheduler = ts;
}
btITaskScheduler* btGetTaskScheduler()
{
return gBtTaskScheduler;
}
void btParallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body )
{
gBtTaskScheduler->parallelFor( iBegin, iEnd, grainSize, body );
}
#if BT_USE_OPENMP
///
/// btTaskSchedulerOpenMP -- OpenMP task scheduler implementation
///
class btTaskSchedulerOpenMP : public btITaskScheduler
{
int m_numThreads;
public:
btTaskSchedulerOpenMP() : btITaskScheduler( "OpenMP" )
{
m_numThreads = 0;
}
virtual int getMaxNumThreads() const BT_OVERRIDE
{
return omp_get_max_threads();
}
virtual int getNumThreads() const BT_OVERRIDE
{
return m_numThreads;
}
virtual void setNumThreads( int numThreads ) BT_OVERRIDE
{
m_numThreads = ( std::max )( 1, numThreads );
omp_set_num_threads( m_numThreads );
}
virtual void parallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body ) BT_OVERRIDE
{
BT_PROFILE( "parallelFor_OpenMP" );
btPushThreadsAreRunning();
#pragma omp parallel for schedule( static, 1 )
for ( int i = iBegin; i < iEnd; i += grainSize )
{
BT_PROFILE( "OpenMP_job" );
body.forLoop( i, ( std::min )( i + grainSize, iEnd ) );
}
btPopThreadsAreRunning();
}
};
#endif // #if BT_USE_OPENMP
#if BT_USE_TBB
///
/// btTaskSchedulerTBB -- task scheduler implemented via Intel Threaded Building Blocks
///
class btTaskSchedulerTBB : public btITaskScheduler
{
int m_numThreads;
tbb::task_scheduler_init* m_tbbSchedulerInit;
public:
btTaskSchedulerTBB() : btITaskScheduler( "IntelTBB" )
{
m_numThreads = 0;
m_tbbSchedulerInit = NULL;
}
~btTaskSchedulerTBB()
{
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
}
virtual int getMaxNumThreads() const BT_OVERRIDE
{
return tbb::task_scheduler_init::default_num_threads();
}
virtual int getNumThreads() const BT_OVERRIDE
{
return m_numThreads;
}
virtual void setNumThreads( int numThreads ) BT_OVERRIDE
{
m_numThreads = ( std::max )( 1, numThreads );
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
m_tbbSchedulerInit = new tbb::task_scheduler_init( m_numThreads );
}
struct BodyAdapter
{
const btIParallelForBody* mBody;
void operator()( const tbb::blocked_range<int>& range ) const
{
BT_PROFILE( "TBB_job" );
mBody->forLoop( range.begin(), range.end() );
}
};
virtual void parallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body ) BT_OVERRIDE
{
BT_PROFILE( "parallelFor_TBB" );
// TBB dispatch
BodyAdapter tbbBody;
tbbBody.mBody = &body;
btPushThreadsAreRunning();
tbb::parallel_for( tbb::blocked_range<int>( iBegin, iEnd, grainSize ),
tbbBody,
tbb::simple_partitioner()
);
btPopThreadsAreRunning();
}
};
#endif // #if BT_USE_TBB
#if BT_USE_PPL
///
/// btTaskSchedulerPPL -- task scheduler implemented via Microsoft Parallel Patterns Lib
///
class btTaskSchedulerPPL : public btITaskScheduler
{
int m_numThreads;
public:
btTaskSchedulerPPL() : btITaskScheduler( "PPL" )
{
m_numThreads = 0;
}
virtual int getMaxNumThreads() const BT_OVERRIDE
{
return concurrency::GetProcessorCount();
}
virtual int getNumThreads() const BT_OVERRIDE
{
return m_numThreads;
}
virtual void setNumThreads( int numThreads ) BT_OVERRIDE
{
m_numThreads = ( std::max )( 1, numThreads );
using namespace concurrency;
if ( CurrentScheduler::Id() != -1 )
{
CurrentScheduler::Detach();
}
SchedulerPolicy policy;
policy.SetConcurrencyLimits( m_numThreads, m_numThreads );
CurrentScheduler::Create( policy );
}
struct BodyAdapter
{
const btIParallelForBody* mBody;
int mGrainSize;
int mIndexEnd;
void operator()( int i ) const
{
BT_PROFILE( "PPL_job" );
mBody->forLoop( i, ( std::min )( i + mGrainSize, mIndexEnd ) );
}
};
virtual void parallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body ) BT_OVERRIDE
{
BT_PROFILE( "parallelFor_PPL" );
// PPL dispatch
BodyAdapter pplBody;
pplBody.mBody = &body;
pplBody.mGrainSize = grainSize;
pplBody.mIndexEnd = iEnd;
btPushThreadsAreRunning();
// note: MSVC 2010 doesn't support partitioner args, so avoid them
concurrency::parallel_for( iBegin,
iEnd,
grainSize,
pplBody
);
btPopThreadsAreRunning();
}
};
#endif // #if BT_USE_PPL
// //
// Lightweight spin-mutex based on atomics // Lightweight spin-mutex based on atomics
@@ -22,8 +270,6 @@ subject to the following restrictions:
// context switching. // context switching.
// //
#if BT_THREADSAFE
#if __cplusplus >= 201103L #if __cplusplus >= 201103L
// for anything claiming full C++11 compliance, use C++11 atomics // for anything claiming full C++11 compliance, use C++11 atomics
@@ -226,6 +472,74 @@ bool btSpinMutex::tryLock()
return true; return true;
} }
// non-parallel version of btParallelFor
void btParallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body )
{
btAssert(!"called btParallelFor in non-threadsafe build. enable BT_THREADSAFE");
body.forLoop( iBegin, iEnd );
}
#endif // #if BT_THREADSAFE #endif // #if BT_THREADSAFE
///
/// btTaskSchedulerSequential -- non-threaded implementation of task scheduler
/// (fallback in case no multi-threaded schedulers are available)
///
class btTaskSchedulerSequential : public btITaskScheduler
{
public:
btTaskSchedulerSequential() : btITaskScheduler( "Sequential" ) {}
virtual int getMaxNumThreads() const BT_OVERRIDE { return 1; }
virtual int getNumThreads() const BT_OVERRIDE { return 1; }
virtual void setNumThreads( int numThreads ) BT_OVERRIDE {}
virtual void parallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body ) BT_OVERRIDE
{
BT_PROFILE( "parallelFor_sequential" );
body.forLoop( iBegin, iEnd );
}
};
// create a non-threaded task scheduler (always available)
btITaskScheduler* btGetSequentialTaskScheduler()
{
static btTaskSchedulerSequential sTaskScheduler;
return &sTaskScheduler;
}
// create an OpenMP task scheduler (if available, otherwise returns null)
btITaskScheduler* btGetOpenMPTaskScheduler()
{
#if BT_USE_OPENMP && BT_THREADSAFE
static btTaskSchedulerOpenMP sTaskScheduler;
return &sTaskScheduler;
#else
return NULL;
#endif
}
// create an Intel TBB task scheduler (if available, otherwise returns null)
btITaskScheduler* btGetTBBTaskScheduler()
{
#if BT_USE_TBB && BT_THREADSAFE
static btTaskSchedulerTBB sTaskScheduler;
return &sTaskScheduler;
#else
return NULL;
#endif
}
// create a PPL task scheduler (if available, otherwise returns null)
btITaskScheduler* btGetPPLTaskScheduler()
{
#if BT_USE_PPL && BT_THREADSAFE
static btTaskSchedulerPPL sTaskScheduler;
return &sTaskScheduler;
#else
return NULL;
#endif
}

View File

@@ -19,6 +19,15 @@ subject to the following restrictions:
#include "btScalar.h" // has definitions like SIMD_FORCE_INLINE #include "btScalar.h" // has definitions like SIMD_FORCE_INLINE
#if defined (_MSC_VER) && _MSC_VER >= 1600
// give us a compile error if any signatures of overriden methods is changed
#define BT_OVERRIDE override
#endif
#ifndef BT_OVERRIDE
#define BT_OVERRIDE
#endif
/// ///
/// btSpinMutex -- lightweight spin-mutex implemented with atomic ops, never puts /// btSpinMutex -- lightweight spin-mutex implemented with atomic ops, never puts
/// a thread to sleep because it is designed to be used with a task scheduler /// a thread to sleep because it is designed to be used with a task scheduler
@@ -59,6 +68,7 @@ SIMD_FORCE_INLINE bool btMutexTryLock( btSpinMutex* mutex )
// for internal use only // for internal use only
bool btIsMainThread(); bool btIsMainThread();
bool btThreadsAreRunning();
unsigned int btGetCurrentThreadIndex(); unsigned int btGetCurrentThreadIndex();
const unsigned int BT_MAX_THREAD_COUNT = 64; const unsigned int BT_MAX_THREAD_COUNT = 64;
@@ -71,5 +81,55 @@ SIMD_FORCE_INLINE void btMutexUnlock( btSpinMutex* ) {}
SIMD_FORCE_INLINE bool btMutexTryLock( btSpinMutex* ) {return true;} SIMD_FORCE_INLINE bool btMutexTryLock( btSpinMutex* ) {return true;}
#endif #endif
//
// btIParallelForBody -- subclass this to express work that can be done in parallel
//
class btIParallelForBody
{
public:
virtual void forLoop( int iBegin, int iEnd ) const = 0;
};
//
// btITaskScheduler -- subclass this to implement a task scheduler that can dispatch work to
// worker threads
//
class btITaskScheduler
{
const char* m_name;
public:
btITaskScheduler( const char* name ) : m_name( name ) {}
const char* getName() const { return m_name; }
virtual ~btITaskScheduler() {}
virtual int getMaxNumThreads() const = 0;
virtual int getNumThreads() const = 0;
virtual void setNumThreads( int numThreads ) = 0;
virtual void parallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body ) = 0;
};
// set the task scheduler to use for all calls to btParallelFor()
// NOTE: you must set this prior to using any of the multi-threaded "Mt" classes
void btSetTaskScheduler( btITaskScheduler* ts );
// get the current task scheduler
btITaskScheduler* btGetTaskScheduler();
// get non-threaded task scheduler (always available)
btITaskScheduler* btGetSequentialTaskScheduler();
// get OpenMP task scheduler (if available, otherwise returns null)
btITaskScheduler* btGetOpenMPTaskScheduler();
// get Intel TBB task scheduler (if available, otherwise returns null)
btITaskScheduler* btGetTBBTaskScheduler();
// get PPL task scheduler (if available, otherwise returns null)
btITaskScheduler* btGetPPLTaskScheduler();
// btParallelFor -- call this to dispatch work like a for-loop
// (iterations may be done out of order, so no dependencies are allowed)
void btParallelFor( int iBegin, int iEnd, int grainSize, const btIParallelForBody& body );
#endif //BT_THREADS_H #endif //BT_THREADS_H