rename gpu_sat -> gpu_narrowphase
This commit is contained in:
1578
opencl/gpu_narrowphase/host/ConvexHullContact.cpp
Normal file
1578
opencl/gpu_narrowphase/host/ConvexHullContact.cpp
Normal file
File diff suppressed because it is too large
Load Diff
97
opencl/gpu_narrowphase/host/ConvexHullContact.h
Normal file
97
opencl/gpu_narrowphase/host/ConvexHullContact.h
Normal file
@@ -0,0 +1,97 @@
|
||||
|
||||
#ifndef _CONVEX_HULL_CONTACT_H
|
||||
#define _CONVEX_HULL_CONTACT_H
|
||||
|
||||
#include "parallel_primitives/host/btOpenCLArray.h"
|
||||
#include "btRigidBodyCL.h"
|
||||
#include "BulletCommon/btAlignedObjectArray.h"
|
||||
#include "btConvexUtility.h"
|
||||
#include "btConvexPolyhedronCL.h"
|
||||
#include "btCollidable.h"
|
||||
#include "btContact4.h"
|
||||
#include "parallel_primitives/host/btInt2.h"
|
||||
#include "parallel_primitives/host/btInt4.h"
|
||||
#include "btOptimizedBvh.h"
|
||||
|
||||
//#include "../../dynamics/basic_demo/Stubs/ChNarrowPhase.h"
|
||||
|
||||
|
||||
struct btYetAnotherAabb
|
||||
{
|
||||
union
|
||||
{
|
||||
float m_min[4];
|
||||
int m_minIndices[4];
|
||||
};
|
||||
union
|
||||
{
|
||||
float m_max[4];
|
||||
//int m_signedMaxIndices[4];
|
||||
//unsigned int m_unsignedMaxIndices[4];
|
||||
};
|
||||
};
|
||||
|
||||
struct GpuSatCollision
|
||||
{
|
||||
cl_context m_context;
|
||||
cl_device_id m_device;
|
||||
cl_command_queue m_queue;
|
||||
cl_kernel m_findSeparatingAxisKernel;
|
||||
cl_kernel m_findConcaveSeparatingAxisKernel;
|
||||
cl_kernel m_findCompoundPairsKernel;
|
||||
cl_kernel m_processCompoundPairsKernel;
|
||||
|
||||
cl_kernel m_clipHullHullKernel;
|
||||
cl_kernel m_clipCompoundsHullHullKernel;
|
||||
|
||||
cl_kernel m_clipFacesAndContactReductionKernel;
|
||||
cl_kernel m_findClippingFacesKernel;
|
||||
|
||||
cl_kernel m_clipHullHullConcaveConvexKernel;
|
||||
cl_kernel m_extractManifoldAndAddContactKernel;
|
||||
cl_kernel m_newContactReductionKernel;
|
||||
|
||||
cl_kernel m_bvhTraversalKernel;
|
||||
cl_kernel m_primitiveContactsKernel;
|
||||
cl_kernel m_findConcaveSphereContactsKernel;
|
||||
|
||||
cl_kernel m_processCompoundPairsPrimitivesKernel;
|
||||
|
||||
|
||||
btOpenCLArray<int> m_totalContactsOut;
|
||||
|
||||
GpuSatCollision(cl_context ctx,cl_device_id device, cl_command_queue q );
|
||||
virtual ~GpuSatCollision();
|
||||
|
||||
|
||||
void computeConvexConvexContactsGPUSAT( const btOpenCLArray<btInt2>* pairs, int nPairs,
|
||||
const btOpenCLArray<btRigidBodyCL>* bodyBuf,
|
||||
btOpenCLArray<btContact4>* contactOut, int& nContacts,
|
||||
int maxContactCapacity,
|
||||
const btOpenCLArray<btConvexPolyhedronCL>& hostConvexData,
|
||||
const btOpenCLArray<btVector3>& vertices,
|
||||
const btOpenCLArray<btVector3>& uniqueEdges,
|
||||
const btOpenCLArray<btGpuFace>& faces,
|
||||
const btOpenCLArray<int>& indices,
|
||||
const btOpenCLArray<btCollidable>& gpuCollidables,
|
||||
const btOpenCLArray<btGpuChildShape>& gpuChildShapes,
|
||||
|
||||
const btOpenCLArray<btYetAnotherAabb>& clAabbs,
|
||||
btOpenCLArray<btVector3>& worldVertsB1GPU,
|
||||
btOpenCLArray<btInt4>& clippingFacesOutGPU,
|
||||
btOpenCLArray<btVector3>& worldNormalsAGPU,
|
||||
btOpenCLArray<btVector3>& worldVertsA1GPU,
|
||||
btOpenCLArray<btVector3>& worldVertsB2GPU,
|
||||
btAlignedObjectArray<class btOptimizedBvh*>& bvhData,
|
||||
btOpenCLArray<btQuantizedBvhNode>* treeNodesGPU,
|
||||
btOpenCLArray<btBvhSubtreeInfo>* subTreesGPU,
|
||||
int numObjects,
|
||||
int maxTriConvexPairCapacity,
|
||||
btOpenCLArray<btInt4>& triangleConvexPairs,
|
||||
int& numTriConvexPairsOut
|
||||
);
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif //_CONVEX_HULL_CONTACT_H
|
||||
50
opencl/gpu_narrowphase/host/btCollidable.h
Normal file
50
opencl/gpu_narrowphase/host/btCollidable.h
Normal file
@@ -0,0 +1,50 @@
|
||||
|
||||
#ifndef BT_COLLIDABLE_H
|
||||
#define BT_COLLIDABLE_H
|
||||
|
||||
enum btShapeTypes
|
||||
{
|
||||
SHAPE_HEIGHT_FIELD=1,
|
||||
|
||||
SHAPE_CONVEX_HULL=3,
|
||||
SHAPE_PLANE=4,
|
||||
SHAPE_CONCAVE_TRIMESH=5,
|
||||
SHAPE_COMPOUND_OF_CONVEX_HULLS=6,
|
||||
SHAPE_SPHERE=7,
|
||||
MAX_NUM_SHAPE_TYPES,
|
||||
};
|
||||
|
||||
struct btCollidable
|
||||
{
|
||||
int m_numChildShapes;
|
||||
float m_radius;
|
||||
int m_shapeType;
|
||||
int m_shapeIndex;
|
||||
};
|
||||
|
||||
struct btCollidableNew
|
||||
{
|
||||
short int m_shapeType;
|
||||
short int m_numShapes;
|
||||
int m_shapeIndex;
|
||||
};
|
||||
|
||||
struct btGpuChildShape
|
||||
{
|
||||
float m_childPosition[4];
|
||||
float m_childOrientation[4];
|
||||
int m_shapeIndex;
|
||||
int m_unused0;
|
||||
int m_unused1;
|
||||
int m_unused2;
|
||||
};
|
||||
|
||||
struct btCompoundOverlappingPair
|
||||
{
|
||||
int m_bodyIndexA;
|
||||
int m_bodyIndexB;
|
||||
// int m_pairType;
|
||||
int m_childShapeIndexA;
|
||||
int m_childShapeIndexB;
|
||||
};
|
||||
#endif //BT_COLLIDABLE_H
|
||||
42
opencl/gpu_narrowphase/host/btContact4.h
Normal file
42
opencl/gpu_narrowphase/host/btContact4.h
Normal file
@@ -0,0 +1,42 @@
|
||||
#ifndef BT_CONTACT4_H
|
||||
#define BT_CONTACT4_H
|
||||
|
||||
#include "BulletCommon/btVector3.h"
|
||||
|
||||
|
||||
ATTRIBUTE_ALIGNED16(struct) btContact4
|
||||
{
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
btVector3 m_worldPos[4];
|
||||
btVector3 m_worldNormal;
|
||||
// float m_restituitionCoeff;
|
||||
// float m_frictionCoeff;
|
||||
unsigned short m_restituitionCoeffCmp;
|
||||
unsigned short m_frictionCoeffCmp;
|
||||
int m_batchIdx;
|
||||
|
||||
int m_bodyAPtrAndSignBit;
|
||||
int m_bodyBPtrAndSignBit;
|
||||
|
||||
int getBodyA()const {return abs(m_bodyAPtrAndSignBit);}
|
||||
int getBodyB()const {return abs(m_bodyBPtrAndSignBit);}
|
||||
bool isBodyAFixed()const { return m_bodyAPtrAndSignBit<0;}
|
||||
bool isBodyBFixed()const { return m_bodyBPtrAndSignBit<0;}
|
||||
// todo. make it safer
|
||||
int& getBatchIdx() { return m_batchIdx; }
|
||||
const int& getBatchIdx() const { return m_batchIdx; }
|
||||
float getRestituitionCoeff() const { return ((float)m_restituitionCoeffCmp/(float)0xffff); }
|
||||
void setRestituitionCoeff( float c ) { btAssert( c >= 0.f && c <= 1.f ); m_restituitionCoeffCmp = (unsigned short)(c*0xffff); }
|
||||
float getFrictionCoeff() const { return ((float)m_frictionCoeffCmp/(float)0xffff); }
|
||||
void setFrictionCoeff( float c ) { btAssert( c >= 0.f && c <= 1.f ); m_frictionCoeffCmp = (unsigned short)(c*0xffff); }
|
||||
|
||||
float& getNPoints() { return m_worldNormal[3]; }
|
||||
float getNPoints() const { return m_worldNormal[3]; }
|
||||
|
||||
float getPenetration(int idx) const { return m_worldPos[idx][3]; }
|
||||
|
||||
bool isInvalid() const { return (getBodyA()==0 || getBodyB()==0); }
|
||||
};
|
||||
|
||||
#endif //BT_CONTACT4_H
|
||||
64
opencl/gpu_narrowphase/host/btConvexPolyhedronCL.h
Normal file
64
opencl/gpu_narrowphase/host/btConvexPolyhedronCL.h
Normal file
@@ -0,0 +1,64 @@
|
||||
#ifndef CONVEX_POLYHEDRON_CL
|
||||
#define CONVEX_POLYHEDRON_CL
|
||||
|
||||
#include "BulletCommon/btTransform.h"
|
||||
|
||||
struct btGpuFace
|
||||
{
|
||||
btVector4 m_plane;
|
||||
int m_indexOffset;
|
||||
int m_numIndices;
|
||||
};
|
||||
|
||||
ATTRIBUTE_ALIGNED16(struct) btConvexPolyhedronCL
|
||||
{
|
||||
btVector3 m_localCenter;
|
||||
btVector3 m_extents;
|
||||
btVector3 mC;
|
||||
btVector3 mE;
|
||||
|
||||
btScalar m_radius;
|
||||
int m_faceOffset;
|
||||
int m_numFaces;
|
||||
int m_numVertices;
|
||||
|
||||
int m_vertexOffset;
|
||||
int m_uniqueEdgesOffset;
|
||||
int m_numUniqueEdges;
|
||||
int m_unused;
|
||||
|
||||
|
||||
|
||||
inline void project(const btTransform& trans, const btVector3& dir, const btAlignedObjectArray<btVector3>& vertices, btScalar& min, btScalar& max) const
|
||||
{
|
||||
min = FLT_MAX;
|
||||
max = -FLT_MAX;
|
||||
int numVerts = m_numVertices;
|
||||
|
||||
const btVector3 localDir = trans.getBasis().transpose()*dir;
|
||||
const btVector3 localDi2 = quatRotate(trans.getRotation().inverse(),dir);
|
||||
|
||||
btScalar offset = trans.getOrigin().dot(dir);
|
||||
|
||||
for(int i=0;i<numVerts;i++)
|
||||
{
|
||||
//btVector3 pt = trans * vertices[m_vertexOffset+i];
|
||||
//btScalar dp = pt.dot(dir);
|
||||
btScalar dp = vertices[m_vertexOffset+i].dot(localDir);
|
||||
//btAssert(dp==dpL);
|
||||
if(dp < min) min = dp;
|
||||
if(dp > max) max = dp;
|
||||
}
|
||||
if(min>max)
|
||||
{
|
||||
btScalar tmp = min;
|
||||
min = max;
|
||||
max = tmp;
|
||||
}
|
||||
min += offset;
|
||||
max += offset;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#endif //CONVEX_POLYHEDRON_CL
|
||||
520
opencl/gpu_narrowphase/host/btConvexUtility.cpp
Normal file
520
opencl/gpu_narrowphase/host/btConvexUtility.cpp
Normal file
@@ -0,0 +1,520 @@
|
||||
/*
|
||||
Copyright (c) 2012 Advanced Micro Devices, Inc.
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
//Originally written by Erwin Coumans
|
||||
|
||||
|
||||
#include "btConvexUtility.h"
|
||||
#include "BulletGeometry/btConvexHullComputer.h"
|
||||
#include "BulletGeometry/btGrahamScan2dConvexHull.h"
|
||||
#include "BulletCommon/btQuaternion.h"
|
||||
#include "BulletCommon/btHashMap.h"
|
||||
|
||||
#include "btConvexPolyhedronCL.h"
|
||||
|
||||
|
||||
|
||||
btConvexUtility::~btConvexUtility()
|
||||
{
|
||||
}
|
||||
|
||||
bool btConvexUtility::initializePolyhedralFeatures(const btVector3* orgVertices, int numPoints, bool mergeCoplanarTriangles)
|
||||
{
|
||||
|
||||
|
||||
|
||||
btConvexHullComputer conv;
|
||||
conv.compute(&orgVertices[0].getX(), sizeof(btVector3),numPoints,0.f,0.f);
|
||||
|
||||
btAlignedObjectArray<btVector3> faceNormals;
|
||||
int numFaces = conv.faces.size();
|
||||
faceNormals.resize(numFaces);
|
||||
btConvexHullComputer* convexUtil = &conv;
|
||||
|
||||
|
||||
btAlignedObjectArray<btMyFace> tmpFaces;
|
||||
tmpFaces.resize(numFaces);
|
||||
|
||||
int numVertices = convexUtil->vertices.size();
|
||||
m_vertices.resize(numVertices);
|
||||
for (int p=0;p<numVertices;p++)
|
||||
{
|
||||
m_vertices[p] = convexUtil->vertices[p];
|
||||
}
|
||||
|
||||
|
||||
for (int i=0;i<numFaces;i++)
|
||||
{
|
||||
int face = convexUtil->faces[i];
|
||||
//printf("face=%d\n",face);
|
||||
const btConvexHullComputer::Edge* firstEdge = &convexUtil->edges[face];
|
||||
const btConvexHullComputer::Edge* edge = firstEdge;
|
||||
|
||||
btVector3 edges[3];
|
||||
int numEdges = 0;
|
||||
//compute face normals
|
||||
|
||||
do
|
||||
{
|
||||
|
||||
int src = edge->getSourceVertex();
|
||||
tmpFaces[i].m_indices.push_back(src);
|
||||
int targ = edge->getTargetVertex();
|
||||
btVector3 wa = convexUtil->vertices[src];
|
||||
|
||||
btVector3 wb = convexUtil->vertices[targ];
|
||||
btVector3 newEdge = wb-wa;
|
||||
newEdge.normalize();
|
||||
if (numEdges<2)
|
||||
edges[numEdges++] = newEdge;
|
||||
|
||||
edge = edge->getNextEdgeOfFace();
|
||||
} while (edge!=firstEdge);
|
||||
|
||||
btScalar planeEq = 1e30f;
|
||||
|
||||
|
||||
if (numEdges==2)
|
||||
{
|
||||
faceNormals[i] = edges[0].cross(edges[1]);
|
||||
faceNormals[i].normalize();
|
||||
tmpFaces[i].m_plane[0] = faceNormals[i].getX();
|
||||
tmpFaces[i].m_plane[1] = faceNormals[i].getY();
|
||||
tmpFaces[i].m_plane[2] = faceNormals[i].getZ();
|
||||
tmpFaces[i].m_plane[3] = planeEq;
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
btAssert(0);//degenerate?
|
||||
faceNormals[i].setZero();
|
||||
}
|
||||
|
||||
for (int v=0;v<tmpFaces[i].m_indices.size();v++)
|
||||
{
|
||||
btScalar eq = m_vertices[tmpFaces[i].m_indices[v]].dot(faceNormals[i]);
|
||||
if (planeEq>eq)
|
||||
{
|
||||
planeEq=eq;
|
||||
}
|
||||
}
|
||||
tmpFaces[i].m_plane[3] = -planeEq;
|
||||
}
|
||||
|
||||
//merge coplanar faces and copy them to m_polyhedron
|
||||
|
||||
btScalar faceWeldThreshold= 0.999f;
|
||||
btAlignedObjectArray<int> todoFaces;
|
||||
for (int i=0;i<tmpFaces.size();i++)
|
||||
todoFaces.push_back(i);
|
||||
|
||||
while (todoFaces.size())
|
||||
{
|
||||
btAlignedObjectArray<int> coplanarFaceGroup;
|
||||
int refFace = todoFaces[todoFaces.size()-1];
|
||||
|
||||
coplanarFaceGroup.push_back(refFace);
|
||||
btMyFace& faceA = tmpFaces[refFace];
|
||||
todoFaces.pop_back();
|
||||
|
||||
btVector3 faceNormalA(faceA.m_plane[0],faceA.m_plane[1],faceA.m_plane[2]);
|
||||
for (int j=todoFaces.size()-1;j>=0;j--)
|
||||
{
|
||||
int i = todoFaces[j];
|
||||
btMyFace& faceB = tmpFaces[i];
|
||||
btVector3 faceNormalB(faceB.m_plane[0],faceB.m_plane[1],faceB.m_plane[2]);
|
||||
if (faceNormalA.dot(faceNormalB)>faceWeldThreshold)
|
||||
{
|
||||
coplanarFaceGroup.push_back(i);
|
||||
todoFaces.remove(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool did_merge = false;
|
||||
if (coplanarFaceGroup.size()>1)
|
||||
{
|
||||
//do the merge: use Graham Scan 2d convex hull
|
||||
|
||||
btAlignedObjectArray<GrahamVector3> orgpoints;
|
||||
btVector3 averageFaceNormal(0,0,0);
|
||||
|
||||
for (int i=0;i<coplanarFaceGroup.size();i++)
|
||||
{
|
||||
// m_polyhedron->m_faces.push_back(tmpFaces[coplanarFaceGroup[i]]);
|
||||
|
||||
btMyFace& face = tmpFaces[coplanarFaceGroup[i]];
|
||||
btVector3 faceNormal(face.m_plane[0],face.m_plane[1],face.m_plane[2]);
|
||||
averageFaceNormal+=faceNormal;
|
||||
for (int f=0;f<face.m_indices.size();f++)
|
||||
{
|
||||
int orgIndex = face.m_indices[f];
|
||||
btVector3 pt = m_vertices[orgIndex];
|
||||
|
||||
bool found = false;
|
||||
|
||||
for (int i=0;i<orgpoints.size();i++)
|
||||
{
|
||||
//if ((orgpoints[i].m_orgIndex == orgIndex) || ((rotatedPt-orgpoints[i]).length2()<0.0001))
|
||||
if (orgpoints[i].m_orgIndex == orgIndex)
|
||||
{
|
||||
found=true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
orgpoints.push_back(GrahamVector3(pt,orgIndex));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
btMyFace combinedFace;
|
||||
for (int i=0;i<4;i++)
|
||||
combinedFace.m_plane[i] = tmpFaces[coplanarFaceGroup[0]].m_plane[i];
|
||||
|
||||
btAlignedObjectArray<GrahamVector3> hull;
|
||||
|
||||
averageFaceNormal.normalize();
|
||||
GrahamScanConvexHull2D(orgpoints,hull,averageFaceNormal);
|
||||
|
||||
for (int i=0;i<hull.size();i++)
|
||||
{
|
||||
combinedFace.m_indices.push_back(hull[i].m_orgIndex);
|
||||
for(int k = 0; k < orgpoints.size(); k++)
|
||||
{
|
||||
if(orgpoints[k].m_orgIndex == hull[i].m_orgIndex)
|
||||
{
|
||||
orgpoints[k].m_orgIndex = -1; // invalidate...
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// are there rejected vertices?
|
||||
bool reject_merge = false;
|
||||
|
||||
|
||||
|
||||
for(int i = 0; i < orgpoints.size(); i++) {
|
||||
if(orgpoints[i].m_orgIndex == -1)
|
||||
continue; // this is in the hull...
|
||||
// this vertex is rejected -- is anybody else using this vertex?
|
||||
for(int j = 0; j < tmpFaces.size(); j++) {
|
||||
|
||||
btMyFace& face = tmpFaces[j];
|
||||
// is this a face of the current coplanar group?
|
||||
bool is_in_current_group = false;
|
||||
for(int k = 0; k < coplanarFaceGroup.size(); k++) {
|
||||
if(coplanarFaceGroup[k] == j) {
|
||||
is_in_current_group = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(is_in_current_group) // ignore this face...
|
||||
continue;
|
||||
// does this face use this rejected vertex?
|
||||
for(int v = 0; v < face.m_indices.size(); v++) {
|
||||
if(face.m_indices[v] == orgpoints[i].m_orgIndex) {
|
||||
// this rejected vertex is used in another face -- reject merge
|
||||
reject_merge = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(reject_merge)
|
||||
break;
|
||||
}
|
||||
if(reject_merge)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!reject_merge)
|
||||
{
|
||||
// do this merge!
|
||||
did_merge = true;
|
||||
m_faces.push_back(combinedFace);
|
||||
}
|
||||
}
|
||||
if(!did_merge)
|
||||
{
|
||||
for (int i=0;i<coplanarFaceGroup.size();i++)
|
||||
{
|
||||
btMyFace face = tmpFaces[coplanarFaceGroup[i]];
|
||||
m_faces.push_back(face);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
initialize();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
inline bool IsAlmostZero(const btVector3& v)
|
||||
{
|
||||
if(fabsf(v.getX())>1e-6 || fabsf(v.getY())>1e-6 || fabsf(v.getZ())>1e-6) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
struct btInternalVertexPair
|
||||
{
|
||||
btInternalVertexPair(short int v0,short int v1)
|
||||
:m_v0(v0),
|
||||
m_v1(v1)
|
||||
{
|
||||
if (m_v1>m_v0)
|
||||
btSwap(m_v0,m_v1);
|
||||
}
|
||||
short int m_v0;
|
||||
short int m_v1;
|
||||
int getHash() const
|
||||
{
|
||||
return m_v0+(m_v1<<16);
|
||||
}
|
||||
bool equals(const btInternalVertexPair& other) const
|
||||
{
|
||||
return m_v0==other.m_v0 && m_v1==other.m_v1;
|
||||
}
|
||||
};
|
||||
|
||||
struct btInternalEdge
|
||||
{
|
||||
btInternalEdge()
|
||||
:m_face0(-1),
|
||||
m_face1(-1)
|
||||
{
|
||||
}
|
||||
short int m_face0;
|
||||
short int m_face1;
|
||||
};
|
||||
|
||||
//
|
||||
|
||||
#ifdef TEST_INTERNAL_OBJECTS
|
||||
bool btConvexUtility::testContainment() const
|
||||
{
|
||||
for(int p=0;p<8;p++)
|
||||
{
|
||||
btVector3 LocalPt;
|
||||
if(p==0) LocalPt = m_localCenter + btVector3(m_extents[0], m_extents[1], m_extents[2]);
|
||||
else if(p==1) LocalPt = m_localCenter + btVector3(m_extents[0], m_extents[1], -m_extents[2]);
|
||||
else if(p==2) LocalPt = m_localCenter + btVector3(m_extents[0], -m_extents[1], m_extents[2]);
|
||||
else if(p==3) LocalPt = m_localCenter + btVector3(m_extents[0], -m_extents[1], -m_extents[2]);
|
||||
else if(p==4) LocalPt = m_localCenter + btVector3(-m_extents[0], m_extents[1], m_extents[2]);
|
||||
else if(p==5) LocalPt = m_localCenter + btVector3(-m_extents[0], m_extents[1], -m_extents[2]);
|
||||
else if(p==6) LocalPt = m_localCenter + btVector3(-m_extents[0], -m_extents[1], m_extents[2]);
|
||||
else if(p==7) LocalPt = m_localCenter + btVector3(-m_extents[0], -m_extents[1], -m_extents[2]);
|
||||
|
||||
for(int i=0;i<m_faces.size();i++)
|
||||
{
|
||||
const btVector3 Normal(m_faces[i].m_plane[0], m_faces[i].m_plane[1], m_faces[i].m_plane[2]);
|
||||
const btScalar d = LocalPt.dot(Normal) + m_faces[i].m_plane[3];
|
||||
if(d>0.0f)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
void btConvexUtility::initialize()
|
||||
{
|
||||
|
||||
btHashMap<btInternalVertexPair,btInternalEdge> edges;
|
||||
|
||||
btScalar TotalArea = 0.0f;
|
||||
|
||||
m_localCenter.setValue(0, 0, 0);
|
||||
for(int i=0;i<m_faces.size();i++)
|
||||
{
|
||||
int numVertices = m_faces[i].m_indices.size();
|
||||
int NbTris = numVertices;
|
||||
for(int j=0;j<NbTris;j++)
|
||||
{
|
||||
int k = (j+1)%numVertices;
|
||||
btInternalVertexPair vp(m_faces[i].m_indices[j],m_faces[i].m_indices[k]);
|
||||
btInternalEdge* edptr = edges.find(vp);
|
||||
btVector3 edge = m_vertices[vp.m_v1]-m_vertices[vp.m_v0];
|
||||
edge.normalize();
|
||||
|
||||
bool found = false;
|
||||
btVector3 diff,diff2;
|
||||
|
||||
for (int p=0;p<m_uniqueEdges.size();p++)
|
||||
{
|
||||
diff = m_uniqueEdges[p]-edge;
|
||||
diff2 = m_uniqueEdges[p]+edge;
|
||||
|
||||
// if ((diff.length2()==0.f) ||
|
||||
// (diff2.length2()==0.f))
|
||||
|
||||
if (IsAlmostZero(diff) ||
|
||||
IsAlmostZero(diff2))
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
{
|
||||
m_uniqueEdges.push_back(edge);
|
||||
}
|
||||
|
||||
if (edptr)
|
||||
{
|
||||
//TBD: figure out why I added this assert
|
||||
// btAssert(edptr->m_face0>=0);
|
||||
// btAssert(edptr->m_face1<0);
|
||||
edptr->m_face1 = i;
|
||||
} else
|
||||
{
|
||||
btInternalEdge ed;
|
||||
ed.m_face0 = i;
|
||||
edges.insert(vp,ed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef USE_CONNECTED_FACES
|
||||
for(int i=0;i<m_faces.size();i++)
|
||||
{
|
||||
int numVertices = m_faces[i].m_indices.size();
|
||||
m_faces[i].m_connectedFaces.resize(numVertices);
|
||||
|
||||
for(int j=0;j<numVertices;j++)
|
||||
{
|
||||
int k = (j+1)%numVertices;
|
||||
btInternalVertexPair vp(m_faces[i].m_indices[j],m_faces[i].m_indices[k]);
|
||||
btInternalEdge* edptr = edges.find(vp);
|
||||
btAssert(edptr);
|
||||
btAssert(edptr->m_face0>=0);
|
||||
btAssert(edptr->m_face1>=0);
|
||||
|
||||
int connectedFace = (edptr->m_face0==i)?edptr->m_face1:edptr->m_face0;
|
||||
m_faces[i].m_connectedFaces[j] = connectedFace;
|
||||
}
|
||||
}
|
||||
#endif//USE_CONNECTED_FACES
|
||||
|
||||
for(int i=0;i<m_faces.size();i++)
|
||||
{
|
||||
int numVertices = m_faces[i].m_indices.size();
|
||||
int NbTris = numVertices-2;
|
||||
|
||||
const btVector3& p0 = m_vertices[m_faces[i].m_indices[0]];
|
||||
for(int j=1;j<=NbTris;j++)
|
||||
{
|
||||
int k = (j+1)%numVertices;
|
||||
const btVector3& p1 = m_vertices[m_faces[i].m_indices[j]];
|
||||
const btVector3& p2 = m_vertices[m_faces[i].m_indices[k]];
|
||||
btScalar Area = ((p0 - p1).cross(p0 - p2)).length() * 0.5f;
|
||||
btVector3 Center = (p0+p1+p2)/3.0f;
|
||||
m_localCenter += Area * Center;
|
||||
TotalArea += Area;
|
||||
}
|
||||
}
|
||||
m_localCenter /= TotalArea;
|
||||
|
||||
|
||||
|
||||
|
||||
#ifdef TEST_INTERNAL_OBJECTS
|
||||
if(1)
|
||||
{
|
||||
m_radius = FLT_MAX;
|
||||
for(int i=0;i<m_faces.size();i++)
|
||||
{
|
||||
const btVector3 Normal(m_faces[i].m_plane[0], m_faces[i].m_plane[1], m_faces[i].m_plane[2]);
|
||||
const btScalar dist = btFabs(m_localCenter.dot(Normal) + m_faces[i].m_plane[3]);
|
||||
if(dist<m_radius)
|
||||
m_radius = dist;
|
||||
}
|
||||
|
||||
|
||||
btScalar MinX = FLT_MAX;
|
||||
btScalar MinY = FLT_MAX;
|
||||
btScalar MinZ = FLT_MAX;
|
||||
btScalar MaxX = -FLT_MAX;
|
||||
btScalar MaxY = -FLT_MAX;
|
||||
btScalar MaxZ = -FLT_MAX;
|
||||
for(int i=0; i<m_vertices.size(); i++)
|
||||
{
|
||||
const btVector3& pt = m_vertices[i];
|
||||
if(pt.getX()<MinX) MinX = pt.getX();
|
||||
if(pt.getX()>MaxX) MaxX = pt.getX();
|
||||
if(pt.getY()<MinY) MinY = pt.getY();
|
||||
if(pt.getY()>MaxY) MaxY = pt.getY();
|
||||
if(pt.getZ()<MinZ) MinZ = pt.getZ();
|
||||
if(pt.getZ()>MaxZ) MaxZ = pt.getZ();
|
||||
}
|
||||
mC.setValue(MaxX+MinX, MaxY+MinY, MaxZ+MinZ);
|
||||
mE.setValue(MaxX-MinX, MaxY-MinY, MaxZ-MinZ);
|
||||
|
||||
|
||||
|
||||
// const btScalar r = m_radius / sqrtf(2.0f);
|
||||
const btScalar r = m_radius / sqrtf(3.0f);
|
||||
const int LargestExtent = mE.maxAxis();
|
||||
const btScalar Step = (mE[LargestExtent]*0.5f - r)/1024.0f;
|
||||
m_extents[0] = m_extents[1] = m_extents[2] = r;
|
||||
m_extents[LargestExtent] = mE[LargestExtent]*0.5f;
|
||||
bool FoundBox = false;
|
||||
for(int j=0;j<1024;j++)
|
||||
{
|
||||
if(testContainment())
|
||||
{
|
||||
FoundBox = true;
|
||||
break;
|
||||
}
|
||||
|
||||
m_extents[LargestExtent] -= Step;
|
||||
}
|
||||
if(!FoundBox)
|
||||
{
|
||||
m_extents[0] = m_extents[1] = m_extents[2] = r;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Refine the box
|
||||
const btScalar Step = (m_radius - r)/1024.0f;
|
||||
const int e0 = (1<<LargestExtent) & 3;
|
||||
const int e1 = (1<<e0) & 3;
|
||||
|
||||
for(int j=0;j<1024;j++)
|
||||
{
|
||||
const btScalar Saved0 = m_extents[e0];
|
||||
const btScalar Saved1 = m_extents[e1];
|
||||
m_extents[e0] += Step;
|
||||
m_extents[e1] += Step;
|
||||
|
||||
if(!testContainment())
|
||||
{
|
||||
m_extents[e0] = Saved0;
|
||||
m_extents[e1] = Saved1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
62
opencl/gpu_narrowphase/host/btConvexUtility.h
Normal file
62
opencl/gpu_narrowphase/host/btConvexUtility.h
Normal file
@@ -0,0 +1,62 @@
|
||||
|
||||
/*
|
||||
Copyright (c) 2012 Advanced Micro Devices, Inc.
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
//Originally written by Erwin Coumans
|
||||
|
||||
#ifndef _BT_CONVEX_UTILITY_H
|
||||
#define _BT_CONVEX_UTILITY_H
|
||||
|
||||
#include "BulletCommon/btAlignedObjectArray.h"
|
||||
#include "BulletCommon/btTransform.h"
|
||||
|
||||
#include "btConvexPolyhedronCL.h"
|
||||
|
||||
|
||||
struct btMyFace
|
||||
{
|
||||
btAlignedObjectArray<int> m_indices;
|
||||
btScalar m_plane[4];
|
||||
};
|
||||
|
||||
ATTRIBUTE_ALIGNED16(class) btConvexUtility
|
||||
{
|
||||
public:
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
btVector3 m_localCenter;
|
||||
btVector3 m_extents;
|
||||
btVector3 mC;
|
||||
btVector3 mE;
|
||||
btScalar m_radius;
|
||||
|
||||
btAlignedObjectArray<btVector3> m_vertices;
|
||||
btAlignedObjectArray<btMyFace> m_faces;
|
||||
btAlignedObjectArray<btVector3> m_uniqueEdges;
|
||||
|
||||
|
||||
btConvexUtility()
|
||||
{
|
||||
}
|
||||
virtual ~btConvexUtility();
|
||||
|
||||
bool initializePolyhedralFeatures(const btVector3* orgVertices, int numVertices, bool mergeCoplanarTriangles=true);
|
||||
|
||||
void initialize();
|
||||
bool testContainment() const;
|
||||
|
||||
|
||||
|
||||
};
|
||||
#endif
|
||||
|
||||
391
opencl/gpu_narrowphase/host/btOptimizedBvh.cpp
Normal file
391
opencl/gpu_narrowphase/host/btOptimizedBvh.cpp
Normal file
@@ -0,0 +1,391 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
|
||||
#include "btOptimizedBvh.h"
|
||||
#include "btStridingMeshInterface.h"
|
||||
#include "BulletGeometry/btAabbUtil2.h"
|
||||
#include "BulletCommon/btIDebugDraw.h"
|
||||
|
||||
|
||||
btOptimizedBvh::btOptimizedBvh()
|
||||
{
|
||||
}
|
||||
|
||||
btOptimizedBvh::~btOptimizedBvh()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void btOptimizedBvh::build(btStridingMeshInterface* triangles, bool useQuantizedAabbCompression, const btVector3& bvhAabbMin, const btVector3& bvhAabbMax)
|
||||
{
|
||||
m_useQuantization = useQuantizedAabbCompression;
|
||||
|
||||
|
||||
// NodeArray triangleNodes;
|
||||
|
||||
struct NodeTriangleCallback : public btInternalTriangleIndexCallback
|
||||
{
|
||||
|
||||
NodeArray& m_triangleNodes;
|
||||
|
||||
NodeTriangleCallback& operator=(NodeTriangleCallback& other)
|
||||
{
|
||||
m_triangleNodes.copyFromArray(other.m_triangleNodes);
|
||||
return *this;
|
||||
}
|
||||
|
||||
NodeTriangleCallback(NodeArray& triangleNodes)
|
||||
:m_triangleNodes(triangleNodes)
|
||||
{
|
||||
}
|
||||
|
||||
virtual void internalProcessTriangleIndex(btVector3* triangle,int partId,int triangleIndex)
|
||||
{
|
||||
btOptimizedBvhNode node;
|
||||
btVector3 aabbMin,aabbMax;
|
||||
aabbMin.setValue(btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT));
|
||||
aabbMax.setValue(btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT));
|
||||
aabbMin.setMin(triangle[0]);
|
||||
aabbMax.setMax(triangle[0]);
|
||||
aabbMin.setMin(triangle[1]);
|
||||
aabbMax.setMax(triangle[1]);
|
||||
aabbMin.setMin(triangle[2]);
|
||||
aabbMax.setMax(triangle[2]);
|
||||
|
||||
//with quantization?
|
||||
node.m_aabbMinOrg = aabbMin;
|
||||
node.m_aabbMaxOrg = aabbMax;
|
||||
|
||||
node.m_escapeIndex = -1;
|
||||
|
||||
//for child nodes
|
||||
node.m_subPart = partId;
|
||||
node.m_triangleIndex = triangleIndex;
|
||||
m_triangleNodes.push_back(node);
|
||||
}
|
||||
};
|
||||
struct QuantizedNodeTriangleCallback : public btInternalTriangleIndexCallback
|
||||
{
|
||||
QuantizedNodeArray& m_triangleNodes;
|
||||
const btQuantizedBvh* m_optimizedTree; // for quantization
|
||||
|
||||
QuantizedNodeTriangleCallback& operator=(QuantizedNodeTriangleCallback& other)
|
||||
{
|
||||
m_triangleNodes.copyFromArray(other.m_triangleNodes);
|
||||
m_optimizedTree = other.m_optimizedTree;
|
||||
return *this;
|
||||
}
|
||||
|
||||
QuantizedNodeTriangleCallback(QuantizedNodeArray& triangleNodes,const btQuantizedBvh* tree)
|
||||
:m_triangleNodes(triangleNodes),m_optimizedTree(tree)
|
||||
{
|
||||
}
|
||||
|
||||
virtual void internalProcessTriangleIndex(btVector3* triangle,int partId,int triangleIndex)
|
||||
{
|
||||
// The partId and triangle index must fit in the same (positive) integer
|
||||
btAssert(partId < (1<<MAX_NUM_PARTS_IN_BITS));
|
||||
btAssert(triangleIndex < (1<<(31-MAX_NUM_PARTS_IN_BITS)));
|
||||
//negative indices are reserved for escapeIndex
|
||||
btAssert(triangleIndex>=0);
|
||||
|
||||
btQuantizedBvhNode node;
|
||||
btVector3 aabbMin,aabbMax;
|
||||
aabbMin.setValue(btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT));
|
||||
aabbMax.setValue(btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT));
|
||||
aabbMin.setMin(triangle[0]);
|
||||
aabbMax.setMax(triangle[0]);
|
||||
aabbMin.setMin(triangle[1]);
|
||||
aabbMax.setMax(triangle[1]);
|
||||
aabbMin.setMin(triangle[2]);
|
||||
aabbMax.setMax(triangle[2]);
|
||||
|
||||
//PCK: add these checks for zero dimensions of aabb
|
||||
const btScalar MIN_AABB_DIMENSION = btScalar(0.002);
|
||||
const btScalar MIN_AABB_HALF_DIMENSION = btScalar(0.001);
|
||||
if (aabbMax.getX() - aabbMin.getX() < MIN_AABB_DIMENSION)
|
||||
{
|
||||
aabbMax.setX(aabbMax.getX() + MIN_AABB_HALF_DIMENSION);
|
||||
aabbMin.setX(aabbMin.getX() - MIN_AABB_HALF_DIMENSION);
|
||||
}
|
||||
if (aabbMax.getY() - aabbMin.getY() < MIN_AABB_DIMENSION)
|
||||
{
|
||||
aabbMax.setY(aabbMax.getY() + MIN_AABB_HALF_DIMENSION);
|
||||
aabbMin.setY(aabbMin.getY() - MIN_AABB_HALF_DIMENSION);
|
||||
}
|
||||
if (aabbMax.getZ() - aabbMin.getZ() < MIN_AABB_DIMENSION)
|
||||
{
|
||||
aabbMax.setZ(aabbMax.getZ() + MIN_AABB_HALF_DIMENSION);
|
||||
aabbMin.setZ(aabbMin.getZ() - MIN_AABB_HALF_DIMENSION);
|
||||
}
|
||||
|
||||
m_optimizedTree->quantize(&node.m_quantizedAabbMin[0],aabbMin,0);
|
||||
m_optimizedTree->quantize(&node.m_quantizedAabbMax[0],aabbMax,1);
|
||||
|
||||
node.m_escapeIndexOrTriangleIndex = (partId<<(31-MAX_NUM_PARTS_IN_BITS)) | triangleIndex;
|
||||
|
||||
m_triangleNodes.push_back(node);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
int numLeafNodes = 0;
|
||||
|
||||
|
||||
if (m_useQuantization)
|
||||
{
|
||||
|
||||
//initialize quantization values
|
||||
setQuantizationValues(bvhAabbMin,bvhAabbMax);
|
||||
|
||||
QuantizedNodeTriangleCallback callback(m_quantizedLeafNodes,this);
|
||||
|
||||
|
||||
triangles->InternalProcessAllTriangles(&callback,m_bvhAabbMin,m_bvhAabbMax);
|
||||
|
||||
//now we have an array of leafnodes in m_leafNodes
|
||||
numLeafNodes = m_quantizedLeafNodes.size();
|
||||
|
||||
|
||||
m_quantizedContiguousNodes.resize(2*numLeafNodes);
|
||||
|
||||
|
||||
} else
|
||||
{
|
||||
NodeTriangleCallback callback(m_leafNodes);
|
||||
|
||||
btVector3 aabbMin(btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT));
|
||||
btVector3 aabbMax(btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT));
|
||||
|
||||
triangles->InternalProcessAllTriangles(&callback,aabbMin,aabbMax);
|
||||
|
||||
//now we have an array of leafnodes in m_leafNodes
|
||||
numLeafNodes = m_leafNodes.size();
|
||||
|
||||
m_contiguousNodes.resize(2*numLeafNodes);
|
||||
}
|
||||
|
||||
m_curNodeIndex = 0;
|
||||
|
||||
buildTree(0,numLeafNodes);
|
||||
|
||||
///if the entire tree is small then subtree size, we need to create a header info for the tree
|
||||
if(m_useQuantization && !m_SubtreeHeaders.size())
|
||||
{
|
||||
btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
|
||||
subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
|
||||
subtree.m_rootNodeIndex = 0;
|
||||
subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
|
||||
}
|
||||
|
||||
//PCK: update the copy of the size
|
||||
m_subtreeHeaderCount = m_SubtreeHeaders.size();
|
||||
|
||||
//PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
|
||||
m_quantizedLeafNodes.clear();
|
||||
m_leafNodes.clear();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void btOptimizedBvh::refit(btStridingMeshInterface* meshInterface,const btVector3& aabbMin,const btVector3& aabbMax)
|
||||
{
|
||||
if (m_useQuantization)
|
||||
{
|
||||
|
||||
setQuantizationValues(aabbMin,aabbMax);
|
||||
|
||||
updateBvhNodes(meshInterface,0,m_curNodeIndex,0);
|
||||
|
||||
///now update all subtree headers
|
||||
|
||||
int i;
|
||||
for (i=0;i<m_SubtreeHeaders.size();i++)
|
||||
{
|
||||
btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
|
||||
subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[subtree.m_rootNodeIndex]);
|
||||
}
|
||||
|
||||
} else
|
||||
{
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void btOptimizedBvh::refitPartial(btStridingMeshInterface* meshInterface,const btVector3& aabbMin,const btVector3& aabbMax)
|
||||
{
|
||||
//incrementally initialize quantization values
|
||||
btAssert(m_useQuantization);
|
||||
|
||||
btAssert(aabbMin.getX() > m_bvhAabbMin.getX());
|
||||
btAssert(aabbMin.getY() > m_bvhAabbMin.getY());
|
||||
btAssert(aabbMin.getZ() > m_bvhAabbMin.getZ());
|
||||
|
||||
btAssert(aabbMax.getX() < m_bvhAabbMax.getX());
|
||||
btAssert(aabbMax.getY() < m_bvhAabbMax.getY());
|
||||
btAssert(aabbMax.getZ() < m_bvhAabbMax.getZ());
|
||||
|
||||
///we should update all quantization values, using updateBvhNodes(meshInterface);
|
||||
///but we only update chunks that overlap the given aabb
|
||||
|
||||
unsigned short quantizedQueryAabbMin[3];
|
||||
unsigned short quantizedQueryAabbMax[3];
|
||||
|
||||
quantize(&quantizedQueryAabbMin[0],aabbMin,0);
|
||||
quantize(&quantizedQueryAabbMax[0],aabbMax,1);
|
||||
|
||||
int i;
|
||||
for (i=0;i<this->m_SubtreeHeaders.size();i++)
|
||||
{
|
||||
btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
|
||||
|
||||
//PCK: unsigned instead of bool
|
||||
unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
|
||||
if (overlap != 0)
|
||||
{
|
||||
updateBvhNodes(meshInterface,subtree.m_rootNodeIndex,subtree.m_rootNodeIndex+subtree.m_subtreeSize,i);
|
||||
|
||||
subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[subtree.m_rootNodeIndex]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void btOptimizedBvh::updateBvhNodes(btStridingMeshInterface* meshInterface,int firstNode,int endNode,int index)
|
||||
{
|
||||
(void)index;
|
||||
|
||||
btAssert(m_useQuantization);
|
||||
|
||||
int curNodeSubPart=-1;
|
||||
|
||||
//get access info to trianglemesh data
|
||||
const unsigned char *vertexbase = 0;
|
||||
int numverts = 0;
|
||||
PHY_ScalarType type = PHY_INTEGER;
|
||||
int stride = 0;
|
||||
const unsigned char *indexbase = 0;
|
||||
int indexstride = 0;
|
||||
int numfaces = 0;
|
||||
PHY_ScalarType indicestype = PHY_INTEGER;
|
||||
|
||||
btVector3 triangleVerts[3];
|
||||
btVector3 aabbMin,aabbMax;
|
||||
const btVector3& meshScaling = meshInterface->getScaling();
|
||||
|
||||
int i;
|
||||
for (i=endNode-1;i>=firstNode;i--)
|
||||
{
|
||||
|
||||
|
||||
btQuantizedBvhNode& curNode = m_quantizedContiguousNodes[i];
|
||||
if (curNode.isLeafNode())
|
||||
{
|
||||
//recalc aabb from triangle data
|
||||
int nodeSubPart = curNode.getPartId();
|
||||
int nodeTriangleIndex = curNode.getTriangleIndex();
|
||||
if (nodeSubPart != curNodeSubPart)
|
||||
{
|
||||
if (curNodeSubPart >= 0)
|
||||
meshInterface->unLockReadOnlyVertexBase(curNodeSubPart);
|
||||
meshInterface->getLockedReadOnlyVertexIndexBase(&vertexbase,numverts, type,stride,&indexbase,indexstride,numfaces,indicestype,nodeSubPart);
|
||||
|
||||
curNodeSubPart = nodeSubPart;
|
||||
btAssert(indicestype==PHY_INTEGER||indicestype==PHY_SHORT);
|
||||
}
|
||||
//triangles->getLockedReadOnlyVertexIndexBase(vertexBase,numVerts,
|
||||
|
||||
unsigned int* gfxbase = (unsigned int*)(indexbase+nodeTriangleIndex*indexstride);
|
||||
|
||||
|
||||
for (int j=2;j>=0;j--)
|
||||
{
|
||||
|
||||
int graphicsindex = indicestype==PHY_SHORT?((unsigned short*)gfxbase)[j]:gfxbase[j];
|
||||
if (type == PHY_FLOAT)
|
||||
{
|
||||
float* graphicsbase = (float*)(vertexbase+graphicsindex*stride);
|
||||
triangleVerts[j] = btVector3(
|
||||
graphicsbase[0]*meshScaling.getX(),
|
||||
graphicsbase[1]*meshScaling.getY(),
|
||||
graphicsbase[2]*meshScaling.getZ());
|
||||
}
|
||||
else
|
||||
{
|
||||
double* graphicsbase = (double*)(vertexbase+graphicsindex*stride);
|
||||
triangleVerts[j] = btVector3( btScalar(graphicsbase[0]*meshScaling.getX()), btScalar(graphicsbase[1]*meshScaling.getY()), btScalar(graphicsbase[2]*meshScaling.getZ()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
aabbMin.setValue(btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT));
|
||||
aabbMax.setValue(btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT));
|
||||
aabbMin.setMin(triangleVerts[0]);
|
||||
aabbMax.setMax(triangleVerts[0]);
|
||||
aabbMin.setMin(triangleVerts[1]);
|
||||
aabbMax.setMax(triangleVerts[1]);
|
||||
aabbMin.setMin(triangleVerts[2]);
|
||||
aabbMax.setMax(triangleVerts[2]);
|
||||
|
||||
quantize(&curNode.m_quantizedAabbMin[0],aabbMin,0);
|
||||
quantize(&curNode.m_quantizedAabbMax[0],aabbMax,1);
|
||||
|
||||
} else
|
||||
{
|
||||
//combine aabb from both children
|
||||
|
||||
btQuantizedBvhNode* leftChildNode = &m_quantizedContiguousNodes[i+1];
|
||||
|
||||
btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? &m_quantizedContiguousNodes[i+2] :
|
||||
&m_quantizedContiguousNodes[i+1+leftChildNode->getEscapeIndex()];
|
||||
|
||||
|
||||
{
|
||||
for (int i=0;i<3;i++)
|
||||
{
|
||||
curNode.m_quantizedAabbMin[i] = leftChildNode->m_quantizedAabbMin[i];
|
||||
if (curNode.m_quantizedAabbMin[i]>rightChildNode->m_quantizedAabbMin[i])
|
||||
curNode.m_quantizedAabbMin[i]=rightChildNode->m_quantizedAabbMin[i];
|
||||
|
||||
curNode.m_quantizedAabbMax[i] = leftChildNode->m_quantizedAabbMax[i];
|
||||
if (curNode.m_quantizedAabbMax[i] < rightChildNode->m_quantizedAabbMax[i])
|
||||
curNode.m_quantizedAabbMax[i] = rightChildNode->m_quantizedAabbMax[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (curNodeSubPart >= 0)
|
||||
meshInterface->unLockReadOnlyVertexBase(curNodeSubPart);
|
||||
|
||||
|
||||
}
|
||||
|
||||
///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
|
||||
btOptimizedBvh* btOptimizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
|
||||
{
|
||||
btQuantizedBvh* bvh = btQuantizedBvh::deSerializeInPlace(i_alignedDataBuffer,i_dataBufferSize,i_swapEndian);
|
||||
|
||||
//we don't add additional data so just do a static upcast
|
||||
return static_cast<btOptimizedBvh*>(bvh);
|
||||
}
|
||||
65
opencl/gpu_narrowphase/host/btOptimizedBvh.h
Normal file
65
opencl/gpu_narrowphase/host/btOptimizedBvh.h
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
///Contains contributions from Disney Studio's
|
||||
|
||||
#ifndef BT_OPTIMIZED_BVH_H
|
||||
#define BT_OPTIMIZED_BVH_H
|
||||
|
||||
#include "btQuantizedBvh.h"
|
||||
|
||||
class btStridingMeshInterface;
|
||||
|
||||
|
||||
///The btOptimizedBvh extends the btQuantizedBvh to create AABB tree for triangle meshes, through the btStridingMeshInterface.
|
||||
ATTRIBUTE_ALIGNED16(class) btOptimizedBvh : public btQuantizedBvh
|
||||
{
|
||||
|
||||
public:
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
protected:
|
||||
|
||||
public:
|
||||
|
||||
btOptimizedBvh();
|
||||
|
||||
virtual ~btOptimizedBvh();
|
||||
|
||||
void build(btStridingMeshInterface* triangles,bool useQuantizedAabbCompression, const btVector3& bvhAabbMin, const btVector3& bvhAabbMax);
|
||||
|
||||
void refit(btStridingMeshInterface* triangles,const btVector3& aabbMin,const btVector3& aabbMax);
|
||||
|
||||
void refitPartial(btStridingMeshInterface* triangles,const btVector3& aabbMin, const btVector3& aabbMax);
|
||||
|
||||
void updateBvhNodes(btStridingMeshInterface* meshInterface,int firstNode,int endNode,int index);
|
||||
|
||||
/// Data buffer MUST be 16 byte aligned
|
||||
virtual bool serializeInPlace(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const
|
||||
{
|
||||
return btQuantizedBvh::serialize(o_alignedDataBuffer,i_dataBufferSize,i_swapEndian);
|
||||
|
||||
}
|
||||
|
||||
///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
|
||||
static btOptimizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
#endif //BT_OPTIMIZED_BVH_H
|
||||
|
||||
|
||||
1302
opencl/gpu_narrowphase/host/btQuantizedBvh.cpp
Normal file
1302
opencl/gpu_narrowphase/host/btQuantizedBvh.cpp
Normal file
File diff suppressed because it is too large
Load Diff
582
opencl/gpu_narrowphase/host/btQuantizedBvh.h
Normal file
582
opencl/gpu_narrowphase/host/btQuantizedBvh.h
Normal file
@@ -0,0 +1,582 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#ifndef BT_QUANTIZED_BVH_H
|
||||
#define BT_QUANTIZED_BVH_H
|
||||
|
||||
class btSerializer;
|
||||
|
||||
//#define DEBUG_CHECK_DEQUANTIZATION 1
|
||||
#ifdef DEBUG_CHECK_DEQUANTIZATION
|
||||
#ifdef __SPU__
|
||||
#define printf spu_printf
|
||||
#endif //__SPU__
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#endif //DEBUG_CHECK_DEQUANTIZATION
|
||||
|
||||
#include "BulletCommon/btVector3.h"
|
||||
#include "BulletCommon/btAlignedAllocator.h"
|
||||
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
#define btQuantizedBvhData btQuantizedBvhDoubleData
|
||||
#define btOptimizedBvhNodeData btOptimizedBvhNodeDoubleData
|
||||
#define btQuantizedBvhDataName "btQuantizedBvhDoubleData"
|
||||
#else
|
||||
#define btQuantizedBvhData btQuantizedBvhFloatData
|
||||
#define btOptimizedBvhNodeData btOptimizedBvhNodeFloatData
|
||||
#define btQuantizedBvhDataName "btQuantizedBvhFloatData"
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
//http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrf__m128.asp
|
||||
|
||||
|
||||
//Note: currently we have 16 bytes per quantized node
|
||||
#define MAX_SUBTREE_SIZE_IN_BYTES 2048
|
||||
|
||||
// 10 gives the potential for 1024 parts, with at most 2^21 (2097152) (minus one
|
||||
// actually) triangles each (since the sign bit is reserved
|
||||
#define MAX_NUM_PARTS_IN_BITS 10
|
||||
|
||||
///btQuantizedBvhNode is a compressed aabb node, 16 bytes.
|
||||
///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
|
||||
ATTRIBUTE_ALIGNED16 (struct) btQuantizedBvhNode
|
||||
{
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
//12 bytes
|
||||
unsigned short int m_quantizedAabbMin[3];
|
||||
unsigned short int m_quantizedAabbMax[3];
|
||||
//4 bytes
|
||||
int m_escapeIndexOrTriangleIndex;
|
||||
|
||||
bool isLeafNode() const
|
||||
{
|
||||
//skipindex is negative (internal node), triangleindex >=0 (leafnode)
|
||||
return (m_escapeIndexOrTriangleIndex >= 0);
|
||||
}
|
||||
int getEscapeIndex() const
|
||||
{
|
||||
btAssert(!isLeafNode());
|
||||
return -m_escapeIndexOrTriangleIndex;
|
||||
}
|
||||
int getTriangleIndex() const
|
||||
{
|
||||
btAssert(isLeafNode());
|
||||
unsigned int x=0;
|
||||
unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS);
|
||||
// Get only the lower bits where the triangle index is stored
|
||||
return (m_escapeIndexOrTriangleIndex&~(y));
|
||||
}
|
||||
int getPartId() const
|
||||
{
|
||||
btAssert(isLeafNode());
|
||||
// Get only the highest bits where the part index is stored
|
||||
return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS));
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
/// btOptimizedBvhNode contains both internal and leaf node information.
|
||||
/// Total node size is 44 bytes / node. You can use the compressed version of 16 bytes.
|
||||
ATTRIBUTE_ALIGNED16 (struct) btOptimizedBvhNode
|
||||
{
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
//32 bytes
|
||||
btVector3 m_aabbMinOrg;
|
||||
btVector3 m_aabbMaxOrg;
|
||||
|
||||
//4
|
||||
int m_escapeIndex;
|
||||
|
||||
//8
|
||||
//for child nodes
|
||||
int m_subPart;
|
||||
int m_triangleIndex;
|
||||
|
||||
//pad the size to 64 bytes
|
||||
char m_padding[20];
|
||||
};
|
||||
|
||||
|
||||
///btBvhSubtreeInfo provides info to gather a subtree of limited size
|
||||
ATTRIBUTE_ALIGNED16(class) btBvhSubtreeInfo
|
||||
{
|
||||
public:
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
//12 bytes
|
||||
unsigned short int m_quantizedAabbMin[3];
|
||||
unsigned short int m_quantizedAabbMax[3];
|
||||
//4 bytes, points to the root of the subtree
|
||||
int m_rootNodeIndex;
|
||||
//4 bytes
|
||||
int m_subtreeSize;
|
||||
int m_padding[3];
|
||||
|
||||
btBvhSubtreeInfo()
|
||||
{
|
||||
//memset(&m_padding[0], 0, sizeof(m_padding));
|
||||
}
|
||||
|
||||
|
||||
void setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode)
|
||||
{
|
||||
m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0];
|
||||
m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1];
|
||||
m_quantizedAabbMin[2] = quantizedNode.m_quantizedAabbMin[2];
|
||||
m_quantizedAabbMax[0] = quantizedNode.m_quantizedAabbMax[0];
|
||||
m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1];
|
||||
m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2];
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
|
||||
class btNodeOverlapCallback
|
||||
{
|
||||
public:
|
||||
virtual ~btNodeOverlapCallback() {};
|
||||
|
||||
virtual void processNode(int subPart, int triangleIndex) = 0;
|
||||
};
|
||||
|
||||
#include "BulletCommon/btAlignedAllocator.h"
|
||||
#include "BulletCommon/btAlignedObjectArray.h"
|
||||
|
||||
|
||||
|
||||
///for code readability:
|
||||
typedef btAlignedObjectArray<btOptimizedBvhNode> NodeArray;
|
||||
typedef btAlignedObjectArray<btQuantizedBvhNode> QuantizedNodeArray;
|
||||
typedef btAlignedObjectArray<btBvhSubtreeInfo> BvhSubtreeInfoArray;
|
||||
|
||||
|
||||
///The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU.
|
||||
///It is used by the btBvhTriangleMeshShape as midphase, and by the btMultiSapBroadphase.
|
||||
///It is recommended to use quantization for better performance and lower memory requirements.
|
||||
ATTRIBUTE_ALIGNED16(class) btQuantizedBvh
|
||||
{
|
||||
public:
|
||||
enum btTraversalMode
|
||||
{
|
||||
TRAVERSAL_STACKLESS = 0,
|
||||
TRAVERSAL_STACKLESS_CACHE_FRIENDLY,
|
||||
TRAVERSAL_RECURSIVE
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
btVector3 m_bvhAabbMin;
|
||||
btVector3 m_bvhAabbMax;
|
||||
btVector3 m_bvhQuantization;
|
||||
|
||||
protected:
|
||||
int m_bulletVersion; //for serialization versioning. It could also be used to detect endianess.
|
||||
|
||||
int m_curNodeIndex;
|
||||
//quantization data
|
||||
bool m_useQuantization;
|
||||
|
||||
|
||||
|
||||
NodeArray m_leafNodes;
|
||||
NodeArray m_contiguousNodes;
|
||||
QuantizedNodeArray m_quantizedLeafNodes;
|
||||
QuantizedNodeArray m_quantizedContiguousNodes;
|
||||
|
||||
btTraversalMode m_traversalMode;
|
||||
BvhSubtreeInfoArray m_SubtreeHeaders;
|
||||
|
||||
//This is only used for serialization so we don't have to add serialization directly to btAlignedObjectArray
|
||||
mutable int m_subtreeHeaderCount;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
|
||||
///this might be refactored into a virtual, it is usually not calculated at run-time
|
||||
void setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin)
|
||||
{
|
||||
if (m_useQuantization)
|
||||
{
|
||||
quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] ,aabbMin,0);
|
||||
} else
|
||||
{
|
||||
m_contiguousNodes[nodeIndex].m_aabbMinOrg = aabbMin;
|
||||
|
||||
}
|
||||
}
|
||||
void setInternalNodeAabbMax(int nodeIndex,const btVector3& aabbMax)
|
||||
{
|
||||
if (m_useQuantization)
|
||||
{
|
||||
quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0],aabbMax,1);
|
||||
} else
|
||||
{
|
||||
m_contiguousNodes[nodeIndex].m_aabbMaxOrg = aabbMax;
|
||||
}
|
||||
}
|
||||
|
||||
btVector3 getAabbMin(int nodeIndex) const
|
||||
{
|
||||
if (m_useQuantization)
|
||||
{
|
||||
return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMin[0]);
|
||||
}
|
||||
//non-quantized
|
||||
return m_leafNodes[nodeIndex].m_aabbMinOrg;
|
||||
|
||||
}
|
||||
btVector3 getAabbMax(int nodeIndex) const
|
||||
{
|
||||
if (m_useQuantization)
|
||||
{
|
||||
return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMax[0]);
|
||||
}
|
||||
//non-quantized
|
||||
return m_leafNodes[nodeIndex].m_aabbMaxOrg;
|
||||
|
||||
}
|
||||
|
||||
|
||||
void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
|
||||
{
|
||||
if (m_useQuantization)
|
||||
{
|
||||
m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = -escapeIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_contiguousNodes[nodeIndex].m_escapeIndex = escapeIndex;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void mergeInternalNodeAabb(int nodeIndex,const btVector3& newAabbMin,const btVector3& newAabbMax)
|
||||
{
|
||||
if (m_useQuantization)
|
||||
{
|
||||
unsigned short int quantizedAabbMin[3];
|
||||
unsigned short int quantizedAabbMax[3];
|
||||
quantize(quantizedAabbMin,newAabbMin,0);
|
||||
quantize(quantizedAabbMax,newAabbMax,1);
|
||||
for (int i=0;i<3;i++)
|
||||
{
|
||||
if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] > quantizedAabbMin[i])
|
||||
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] = quantizedAabbMin[i];
|
||||
|
||||
if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] < quantizedAabbMax[i])
|
||||
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] = quantizedAabbMax[i];
|
||||
|
||||
}
|
||||
} else
|
||||
{
|
||||
//non-quantized
|
||||
m_contiguousNodes[nodeIndex].m_aabbMinOrg.setMin(newAabbMin);
|
||||
m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax);
|
||||
}
|
||||
}
|
||||
|
||||
void swapLeafNodes(int firstIndex,int secondIndex);
|
||||
|
||||
void assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex);
|
||||
|
||||
protected:
|
||||
|
||||
|
||||
|
||||
void buildTree (int startIndex,int endIndex);
|
||||
|
||||
int calcSplittingAxis(int startIndex,int endIndex);
|
||||
|
||||
int sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis);
|
||||
|
||||
void walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
|
||||
|
||||
void walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
|
||||
void walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const;
|
||||
void walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
|
||||
|
||||
///tree traversal designed for small-memory processors like PS3 SPU
|
||||
void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
|
||||
|
||||
///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
|
||||
void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
|
||||
|
||||
///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
|
||||
void walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA,const btQuantizedBvhNode* treeNodeB,btNodeOverlapCallback* nodeCallback) const;
|
||||
|
||||
|
||||
|
||||
|
||||
void updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex);
|
||||
|
||||
public:
|
||||
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
btQuantizedBvh();
|
||||
|
||||
virtual ~btQuantizedBvh();
|
||||
|
||||
|
||||
///***************************************** expert/internal use only *************************
|
||||
void setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin=btScalar(1.0));
|
||||
QuantizedNodeArray& getLeafNodeArray() { return m_quantizedLeafNodes; }
|
||||
///buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialized
|
||||
void buildInternal();
|
||||
///***************************************** expert/internal use only *************************
|
||||
|
||||
void reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
|
||||
void reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const;
|
||||
void reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const;
|
||||
|
||||
SIMD_FORCE_INLINE void quantize(unsigned short* out, const btVector3& point,int isMax) const
|
||||
{
|
||||
|
||||
btAssert(m_useQuantization);
|
||||
|
||||
btAssert(point.getX() <= m_bvhAabbMax.getX());
|
||||
btAssert(point.getY() <= m_bvhAabbMax.getY());
|
||||
btAssert(point.getZ() <= m_bvhAabbMax.getZ());
|
||||
|
||||
btAssert(point.getX() >= m_bvhAabbMin.getX());
|
||||
btAssert(point.getY() >= m_bvhAabbMin.getY());
|
||||
btAssert(point.getZ() >= m_bvhAabbMin.getZ());
|
||||
|
||||
btVector3 v = (point - m_bvhAabbMin) * m_bvhQuantization;
|
||||
///Make sure rounding is done in a way that unQuantize(quantizeWithClamp(...)) is conservative
|
||||
///end-points always set the first bit, so that they are sorted properly (so that neighbouring AABBs overlap properly)
|
||||
///@todo: double-check this
|
||||
if (isMax)
|
||||
{
|
||||
out[0] = (unsigned short) (((unsigned short)(v.getX()+btScalar(1.)) | 1));
|
||||
out[1] = (unsigned short) (((unsigned short)(v.getY()+btScalar(1.)) | 1));
|
||||
out[2] = (unsigned short) (((unsigned short)(v.getZ()+btScalar(1.)) | 1));
|
||||
} else
|
||||
{
|
||||
out[0] = (unsigned short) (((unsigned short)(v.getX()) & 0xfffe));
|
||||
out[1] = (unsigned short) (((unsigned short)(v.getY()) & 0xfffe));
|
||||
out[2] = (unsigned short) (((unsigned short)(v.getZ()) & 0xfffe));
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG_CHECK_DEQUANTIZATION
|
||||
btVector3 newPoint = unQuantize(out);
|
||||
if (isMax)
|
||||
{
|
||||
if (newPoint.getX() < point.getX())
|
||||
{
|
||||
printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
|
||||
}
|
||||
if (newPoint.getY() < point.getY())
|
||||
{
|
||||
printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
|
||||
}
|
||||
if (newPoint.getZ() < point.getZ())
|
||||
{
|
||||
|
||||
printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
|
||||
}
|
||||
} else
|
||||
{
|
||||
if (newPoint.getX() > point.getX())
|
||||
{
|
||||
printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
|
||||
}
|
||||
if (newPoint.getY() > point.getY())
|
||||
{
|
||||
printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
|
||||
}
|
||||
if (newPoint.getZ() > point.getZ())
|
||||
{
|
||||
printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
|
||||
}
|
||||
}
|
||||
#endif //DEBUG_CHECK_DEQUANTIZATION
|
||||
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const btVector3& point2,int isMax) const
|
||||
{
|
||||
|
||||
btAssert(m_useQuantization);
|
||||
|
||||
btVector3 clampedPoint(point2);
|
||||
clampedPoint.setMax(m_bvhAabbMin);
|
||||
clampedPoint.setMin(m_bvhAabbMax);
|
||||
|
||||
quantize(out,clampedPoint,isMax);
|
||||
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btVector3 unQuantize(const unsigned short* vecIn) const
|
||||
{
|
||||
btVector3 vecOut;
|
||||
vecOut.setValue(
|
||||
(btScalar)(vecIn[0]) / (m_bvhQuantization.getX()),
|
||||
(btScalar)(vecIn[1]) / (m_bvhQuantization.getY()),
|
||||
(btScalar)(vecIn[2]) / (m_bvhQuantization.getZ()));
|
||||
vecOut += m_bvhAabbMin;
|
||||
return vecOut;
|
||||
}
|
||||
|
||||
///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees.
|
||||
void setTraversalMode(btTraversalMode traversalMode)
|
||||
{
|
||||
m_traversalMode = traversalMode;
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE QuantizedNodeArray& getQuantizedNodeArray()
|
||||
{
|
||||
return m_quantizedContiguousNodes;
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE BvhSubtreeInfoArray& getSubtreeInfoArray()
|
||||
{
|
||||
return m_SubtreeHeaders;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
|
||||
/////Calculate space needed to store BVH for serialization
|
||||
unsigned calculateSerializeBufferSize() const;
|
||||
|
||||
/// Data buffer MUST be 16 byte aligned
|
||||
virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const;
|
||||
|
||||
///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
|
||||
static btQuantizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
|
||||
|
||||
static unsigned int getAlignmentSerializationPadding();
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
virtual int calculateSerializeBufferSizeNew() const;
|
||||
|
||||
///fills the dataBuffer and returns the struct name (and 0 on failure)
|
||||
virtual const char* serialize(void* dataBuffer, btSerializer* serializer) const;
|
||||
|
||||
virtual void deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData);
|
||||
|
||||
virtual void deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData);
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
|
||||
SIMD_FORCE_INLINE bool isQuantized()
|
||||
{
|
||||
return m_useQuantization;
|
||||
}
|
||||
|
||||
private:
|
||||
// Special "copy" constructor that allows for in-place deserialization
|
||||
// Prevents btVector3's default constructor from being called, but doesn't inialize much else
|
||||
// ownsMemory should most likely be false if deserializing, and if you are not, don't call this (it also changes the function signature, which we need)
|
||||
btQuantizedBvh(btQuantizedBvh &other, bool ownsMemory);
|
||||
|
||||
}
|
||||
;
|
||||
|
||||
|
||||
struct btBvhSubtreeInfoData
|
||||
{
|
||||
int m_rootNodeIndex;
|
||||
int m_subtreeSize;
|
||||
unsigned short m_quantizedAabbMin[3];
|
||||
unsigned short m_quantizedAabbMax[3];
|
||||
};
|
||||
|
||||
struct btOptimizedBvhNodeFloatData
|
||||
{
|
||||
btVector3FloatData m_aabbMinOrg;
|
||||
btVector3FloatData m_aabbMaxOrg;
|
||||
int m_escapeIndex;
|
||||
int m_subPart;
|
||||
int m_triangleIndex;
|
||||
char m_pad[4];
|
||||
};
|
||||
|
||||
struct btOptimizedBvhNodeDoubleData
|
||||
{
|
||||
btVector3DoubleData m_aabbMinOrg;
|
||||
btVector3DoubleData m_aabbMaxOrg;
|
||||
int m_escapeIndex;
|
||||
int m_subPart;
|
||||
int m_triangleIndex;
|
||||
char m_pad[4];
|
||||
};
|
||||
|
||||
|
||||
struct btQuantizedBvhNodeData
|
||||
{
|
||||
unsigned short m_quantizedAabbMin[3];
|
||||
unsigned short m_quantizedAabbMax[3];
|
||||
int m_escapeIndexOrTriangleIndex;
|
||||
};
|
||||
|
||||
struct btQuantizedBvhFloatData
|
||||
{
|
||||
btVector3FloatData m_bvhAabbMin;
|
||||
btVector3FloatData m_bvhAabbMax;
|
||||
btVector3FloatData m_bvhQuantization;
|
||||
int m_curNodeIndex;
|
||||
int m_useQuantization;
|
||||
int m_numContiguousLeafNodes;
|
||||
int m_numQuantizedContiguousNodes;
|
||||
btOptimizedBvhNodeFloatData *m_contiguousNodesPtr;
|
||||
btQuantizedBvhNodeData *m_quantizedContiguousNodesPtr;
|
||||
btBvhSubtreeInfoData *m_subTreeInfoPtr;
|
||||
int m_traversalMode;
|
||||
int m_numSubtreeHeaders;
|
||||
|
||||
};
|
||||
|
||||
struct btQuantizedBvhDoubleData
|
||||
{
|
||||
btVector3DoubleData m_bvhAabbMin;
|
||||
btVector3DoubleData m_bvhAabbMax;
|
||||
btVector3DoubleData m_bvhQuantization;
|
||||
int m_curNodeIndex;
|
||||
int m_useQuantization;
|
||||
int m_numContiguousLeafNodes;
|
||||
int m_numQuantizedContiguousNodes;
|
||||
btOptimizedBvhNodeDoubleData *m_contiguousNodesPtr;
|
||||
btQuantizedBvhNodeData *m_quantizedContiguousNodesPtr;
|
||||
|
||||
int m_traversalMode;
|
||||
int m_numSubtreeHeaders;
|
||||
btBvhSubtreeInfoData *m_subTreeInfoPtr;
|
||||
};
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE int btQuantizedBvh::calculateSerializeBufferSizeNew() const
|
||||
{
|
||||
return sizeof(btQuantizedBvhData);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif //BT_QUANTIZED_BVH_H
|
||||
35
opencl/gpu_narrowphase/host/btRigidBodyCL.h
Normal file
35
opencl/gpu_narrowphase/host/btRigidBodyCL.h
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef BT_RIGID_BODY_CL
|
||||
#define BT_RIGID_BODY_CL
|
||||
|
||||
#include "BulletCommon/btScalar.h"
|
||||
#include "BulletCommon/btMatrix3x3.h"
|
||||
|
||||
ATTRIBUTE_ALIGNED16(struct) btRigidBodyCL
|
||||
{
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
btVector3 m_pos;
|
||||
btQuaternion m_quat;
|
||||
btVector3 m_linVel;
|
||||
btVector3 m_angVel;
|
||||
|
||||
int m_collidableIdx;
|
||||
float m_invMass;
|
||||
float m_restituitionCoeff;
|
||||
float m_frictionCoeff;
|
||||
|
||||
float getInvMass() const
|
||||
{
|
||||
return m_invMass;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct btInertiaCL
|
||||
{
|
||||
btMatrix3x3 m_invInertiaWorld;
|
||||
btMatrix3x3 m_initInvInertia;
|
||||
};
|
||||
|
||||
|
||||
#endif//BT_RIGID_BODY_CL
|
||||
214
opencl/gpu_narrowphase/host/btStridingMeshInterface.cpp
Normal file
214
opencl/gpu_narrowphase/host/btStridingMeshInterface.cpp
Normal file
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#include "btStridingMeshInterface.h"
|
||||
|
||||
|
||||
btStridingMeshInterface::~btStridingMeshInterface()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
|
||||
void btStridingMeshInterface::InternalProcessAllTriangles(btInternalTriangleIndexCallback* callback,const btVector3& aabbMin,const btVector3& aabbMax) const
|
||||
{
|
||||
(void)aabbMin;
|
||||
(void)aabbMax;
|
||||
int numtotalphysicsverts = 0;
|
||||
int part,graphicssubparts = getNumSubParts();
|
||||
const unsigned char * vertexbase;
|
||||
const unsigned char * indexbase;
|
||||
int indexstride;
|
||||
PHY_ScalarType type;
|
||||
PHY_ScalarType gfxindextype;
|
||||
int stride,numverts,numtriangles;
|
||||
int gfxindex;
|
||||
btVector3 triangle[3];
|
||||
|
||||
btVector3 meshScaling = getScaling();
|
||||
|
||||
///if the number of parts is big, the performance might drop due to the innerloop switch on indextype
|
||||
for (part=0;part<graphicssubparts ;part++)
|
||||
{
|
||||
getLockedReadOnlyVertexIndexBase(&vertexbase,numverts,type,stride,&indexbase,indexstride,numtriangles,gfxindextype,part);
|
||||
numtotalphysicsverts+=numtriangles*3; //upper bound
|
||||
|
||||
///unlike that developers want to pass in double-precision meshes in single-precision Bullet build
|
||||
///so disable this feature by default
|
||||
///see patch http://code.google.com/p/bullet/issues/detail?id=213
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case PHY_FLOAT:
|
||||
{
|
||||
|
||||
float* graphicsbase;
|
||||
|
||||
switch (gfxindextype)
|
||||
{
|
||||
case PHY_INTEGER:
|
||||
{
|
||||
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
|
||||
{
|
||||
unsigned int* tri_indices= (unsigned int*)(indexbase+gfxindex*indexstride);
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[0]*stride);
|
||||
triangle[0].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(),graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[1]*stride);
|
||||
triangle[1].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[2]*stride);
|
||||
triangle[2].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
|
||||
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PHY_SHORT:
|
||||
{
|
||||
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
|
||||
{
|
||||
unsigned short int* tri_indices= (unsigned short int*)(indexbase+gfxindex*indexstride);
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[0]*stride);
|
||||
triangle[0].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(),graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[1]*stride);
|
||||
triangle[1].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[2]*stride);
|
||||
triangle[2].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
|
||||
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PHY_UCHAR:
|
||||
{
|
||||
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
|
||||
{
|
||||
unsigned char* tri_indices= (unsigned char*)(indexbase+gfxindex*indexstride);
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[0]*stride);
|
||||
triangle[0].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(),graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[1]*stride);
|
||||
triangle[1].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (float*)(vertexbase+tri_indices[2]*stride);
|
||||
triangle[2].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
|
||||
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
btAssert((gfxindextype == PHY_INTEGER) || (gfxindextype == PHY_SHORT));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PHY_DOUBLE:
|
||||
{
|
||||
double* graphicsbase;
|
||||
|
||||
switch (gfxindextype)
|
||||
{
|
||||
case PHY_INTEGER:
|
||||
{
|
||||
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
|
||||
{
|
||||
unsigned int* tri_indices= (unsigned int*)(indexbase+gfxindex*indexstride);
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[0]*stride);
|
||||
triangle[0].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(),(btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[1]*stride);
|
||||
triangle[1].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(), (btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[2]*stride);
|
||||
triangle[2].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(), (btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PHY_SHORT:
|
||||
{
|
||||
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
|
||||
{
|
||||
unsigned short int* tri_indices= (unsigned short int*)(indexbase+gfxindex*indexstride);
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[0]*stride);
|
||||
triangle[0].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(),(btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[1]*stride);
|
||||
triangle[1].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(), (btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[2]*stride);
|
||||
triangle[2].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(), (btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PHY_UCHAR:
|
||||
{
|
||||
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
|
||||
{
|
||||
unsigned char* tri_indices= (unsigned char*)(indexbase+gfxindex*indexstride);
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[0]*stride);
|
||||
triangle[0].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(),(btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[1]*stride);
|
||||
triangle[1].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(), (btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
graphicsbase = (double*)(vertexbase+tri_indices[2]*stride);
|
||||
triangle[2].setValue((btScalar)graphicsbase[0]*meshScaling.getX(),(btScalar)graphicsbase[1]*meshScaling.getY(), (btScalar)graphicsbase[2]*meshScaling.getZ());
|
||||
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
btAssert((gfxindextype == PHY_INTEGER) || (gfxindextype == PHY_SHORT));
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
btAssert((type == PHY_FLOAT) || (type == PHY_DOUBLE));
|
||||
}
|
||||
|
||||
unLockReadOnlyVertexBase(part);
|
||||
}
|
||||
}
|
||||
|
||||
void btStridingMeshInterface::calculateAabbBruteForce(btVector3& aabbMin,btVector3& aabbMax)
|
||||
{
|
||||
|
||||
struct AabbCalculationCallback : public btInternalTriangleIndexCallback
|
||||
{
|
||||
btVector3 m_aabbMin;
|
||||
btVector3 m_aabbMax;
|
||||
|
||||
AabbCalculationCallback()
|
||||
{
|
||||
m_aabbMin.setValue(btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT));
|
||||
m_aabbMax.setValue(btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT));
|
||||
}
|
||||
|
||||
virtual void internalProcessTriangleIndex(btVector3* triangle,int partId,int triangleIndex)
|
||||
{
|
||||
(void)partId;
|
||||
(void)triangleIndex;
|
||||
|
||||
m_aabbMin.setMin(triangle[0]);
|
||||
m_aabbMax.setMax(triangle[0]);
|
||||
m_aabbMin.setMin(triangle[1]);
|
||||
m_aabbMax.setMax(triangle[1]);
|
||||
m_aabbMin.setMin(triangle[2]);
|
||||
m_aabbMax.setMax(triangle[2]);
|
||||
}
|
||||
};
|
||||
|
||||
//first calculate the total aabb for all triangles
|
||||
AabbCalculationCallback aabbCallback;
|
||||
aabbMin.setValue(btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT),btScalar(-BT_LARGE_FLOAT));
|
||||
aabbMax.setValue(btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT),btScalar(BT_LARGE_FLOAT));
|
||||
InternalProcessAllTriangles(&aabbCallback,aabbMin,aabbMax);
|
||||
|
||||
aabbMin = aabbCallback.m_aabbMin;
|
||||
aabbMax = aabbCallback.m_aabbMax;
|
||||
}
|
||||
|
||||
|
||||
167
opencl/gpu_narrowphase/host/btStridingMeshInterface.h
Normal file
167
opencl/gpu_narrowphase/host/btStridingMeshInterface.h
Normal file
@@ -0,0 +1,167 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#ifndef BT_STRIDING_MESHINTERFACE_H
|
||||
#define BT_STRIDING_MESHINTERFACE_H
|
||||
|
||||
#include "BulletCommon/btVector3.h"
|
||||
#include "btTriangleCallback.h"
|
||||
//#include "btConcaveShape.h"
|
||||
|
||||
|
||||
enum PHY_ScalarType {
|
||||
PHY_FLOAT, PHY_DOUBLE, PHY_INTEGER, PHY_SHORT,
|
||||
PHY_FIXEDPOINT88, PHY_UCHAR
|
||||
};
|
||||
|
||||
|
||||
/// The btStridingMeshInterface is the interface class for high performance generic access to triangle meshes, used in combination with btBvhTriangleMeshShape and some other collision shapes.
|
||||
/// Using index striding of 3*sizeof(integer) it can use triangle arrays, using index striding of 1*sizeof(integer) it can handle triangle strips.
|
||||
/// It allows for sharing graphics and collision meshes. Also it provides locking/unlocking of graphics meshes that are in gpu memory.
|
||||
ATTRIBUTE_ALIGNED16(class ) btStridingMeshInterface
|
||||
{
|
||||
protected:
|
||||
|
||||
btVector3 m_scaling;
|
||||
|
||||
public:
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
btStridingMeshInterface() :m_scaling(btScalar(1.),btScalar(1.),btScalar(1.))
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
virtual ~btStridingMeshInterface();
|
||||
|
||||
|
||||
|
||||
virtual void InternalProcessAllTriangles(btInternalTriangleIndexCallback* callback,const btVector3& aabbMin,const btVector3& aabbMax) const;
|
||||
|
||||
///brute force method to calculate aabb
|
||||
void calculateAabbBruteForce(btVector3& aabbMin,btVector3& aabbMax);
|
||||
|
||||
/// get read and write access to a subpart of a triangle mesh
|
||||
/// this subpart has a continuous array of vertices and indices
|
||||
/// in this way the mesh can be handled as chunks of memory with striding
|
||||
/// very similar to OpenGL vertexarray support
|
||||
/// make a call to unLockVertexBase when the read and write access is finished
|
||||
virtual void getLockedVertexIndexBase(unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& stride,unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0)=0;
|
||||
|
||||
virtual void getLockedReadOnlyVertexIndexBase(const unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& stride,const unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0) const=0;
|
||||
|
||||
/// unLockVertexBase finishes the access to a subpart of the triangle mesh
|
||||
/// make a call to unLockVertexBase when the read and write access (using getLockedVertexIndexBase) is finished
|
||||
virtual void unLockVertexBase(int subpart)=0;
|
||||
|
||||
virtual void unLockReadOnlyVertexBase(int subpart) const=0;
|
||||
|
||||
|
||||
/// getNumSubParts returns the number of seperate subparts
|
||||
/// each subpart has a continuous array of vertices and indices
|
||||
virtual int getNumSubParts() const=0;
|
||||
|
||||
virtual void preallocateVertices(int numverts)=0;
|
||||
virtual void preallocateIndices(int numindices)=0;
|
||||
|
||||
virtual bool hasPremadeAabb() const { return false; }
|
||||
virtual void setPremadeAabb(const btVector3& aabbMin, const btVector3& aabbMax ) const
|
||||
{
|
||||
(void) aabbMin;
|
||||
(void) aabbMax;
|
||||
}
|
||||
virtual void getPremadeAabb(btVector3* aabbMin, btVector3* aabbMax ) const
|
||||
{
|
||||
(void) aabbMin;
|
||||
(void) aabbMax;
|
||||
}
|
||||
|
||||
const btVector3& getScaling() const {
|
||||
return m_scaling;
|
||||
}
|
||||
void setScaling(const btVector3& scaling)
|
||||
{
|
||||
m_scaling = scaling;
|
||||
}
|
||||
|
||||
virtual int calculateSerializeBufferSize() const;
|
||||
|
||||
///fills the dataBuffer and returns the struct name (and 0 on failure)
|
||||
//virtual const char* serialize(void* dataBuffer, btSerializer* serializer) const;
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct btIntIndexData
|
||||
{
|
||||
int m_value;
|
||||
};
|
||||
|
||||
struct btShortIntIndexData
|
||||
{
|
||||
short m_value;
|
||||
char m_pad[2];
|
||||
};
|
||||
|
||||
struct btShortIntIndexTripletData
|
||||
{
|
||||
short m_values[3];
|
||||
char m_pad[2];
|
||||
};
|
||||
|
||||
struct btCharIndexTripletData
|
||||
{
|
||||
unsigned char m_values[3];
|
||||
char m_pad;
|
||||
};
|
||||
|
||||
|
||||
///do not change those serialization structures, it requires an updated sBulletDNAstr/sBulletDNAstr64
|
||||
struct btMeshPartData
|
||||
{
|
||||
btVector3FloatData *m_vertices3f;
|
||||
btVector3DoubleData *m_vertices3d;
|
||||
|
||||
btIntIndexData *m_indices32;
|
||||
btShortIntIndexTripletData *m_3indices16;
|
||||
btCharIndexTripletData *m_3indices8;
|
||||
|
||||
btShortIntIndexData *m_indices16;//backwards compatibility
|
||||
|
||||
int m_numTriangles;//length of m_indices = m_numTriangles
|
||||
int m_numVertices;
|
||||
};
|
||||
|
||||
|
||||
///do not change those serialization structures, it requires an updated sBulletDNAstr/sBulletDNAstr64
|
||||
struct btStridingMeshInterfaceData
|
||||
{
|
||||
btMeshPartData *m_meshPartsPtr;
|
||||
btVector3FloatData m_scaling;
|
||||
int m_numMeshParts;
|
||||
char m_padding[4];
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE int btStridingMeshInterface::calculateSerializeBufferSize() const
|
||||
{
|
||||
return sizeof(btStridingMeshInterfaceData);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif //BT_STRIDING_MESHINTERFACE_H
|
||||
28
opencl/gpu_narrowphase/host/btTriangleCallback.cpp
Normal file
28
opencl/gpu_narrowphase/host/btTriangleCallback.cpp
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#include "btTriangleCallback.h"
|
||||
|
||||
btTriangleCallback::~btTriangleCallback()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
|
||||
btInternalTriangleIndexCallback::~btInternalTriangleIndexCallback()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
42
opencl/gpu_narrowphase/host/btTriangleCallback.h
Normal file
42
opencl/gpu_narrowphase/host/btTriangleCallback.h
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#ifndef BT_TRIANGLE_CALLBACK_H
|
||||
#define BT_TRIANGLE_CALLBACK_H
|
||||
|
||||
#include "BulletCommon/btVector3.h"
|
||||
|
||||
|
||||
///The btTriangleCallback provides a callback for each overlapping triangle when calling processAllTriangles.
|
||||
///This callback is called by processAllTriangles for all btConcaveShape derived class, such as btBvhTriangleMeshShape, btStaticPlaneShape and btHeightfieldTerrainShape.
|
||||
class btTriangleCallback
|
||||
{
|
||||
public:
|
||||
|
||||
virtual ~btTriangleCallback();
|
||||
virtual void processTriangle(btVector3* triangle, int partId, int triangleIndex) = 0;
|
||||
};
|
||||
|
||||
class btInternalTriangleIndexCallback
|
||||
{
|
||||
public:
|
||||
|
||||
virtual ~btInternalTriangleIndexCallback();
|
||||
virtual void internalProcessTriangleIndex(btVector3* triangle,int partId,int triangleIndex) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif //BT_TRIANGLE_CALLBACK_H
|
||||
95
opencl/gpu_narrowphase/host/btTriangleIndexVertexArray.cpp
Normal file
95
opencl/gpu_narrowphase/host/btTriangleIndexVertexArray.cpp
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#include "btTriangleIndexVertexArray.h"
|
||||
|
||||
btTriangleIndexVertexArray::btTriangleIndexVertexArray(int numTriangles,int* triangleIndexBase,int triangleIndexStride,int numVertices,btScalar* vertexBase,int vertexStride)
|
||||
: m_hasAabb(0)
|
||||
{
|
||||
btIndexedMesh mesh;
|
||||
|
||||
mesh.m_numTriangles = numTriangles;
|
||||
mesh.m_triangleIndexBase = (const unsigned char *)triangleIndexBase;
|
||||
mesh.m_triangleIndexStride = triangleIndexStride;
|
||||
mesh.m_numVertices = numVertices;
|
||||
mesh.m_vertexBase = (const unsigned char *)vertexBase;
|
||||
mesh.m_vertexStride = vertexStride;
|
||||
|
||||
addIndexedMesh(mesh);
|
||||
|
||||
}
|
||||
|
||||
btTriangleIndexVertexArray::~btTriangleIndexVertexArray()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void btTriangleIndexVertexArray::getLockedVertexIndexBase(unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart)
|
||||
{
|
||||
btAssert(subpart< getNumSubParts() );
|
||||
|
||||
btIndexedMesh& mesh = m_indexedMeshes[subpart];
|
||||
|
||||
numverts = mesh.m_numVertices;
|
||||
(*vertexbase) = (unsigned char *) mesh.m_vertexBase;
|
||||
|
||||
type = mesh.m_vertexType;
|
||||
|
||||
vertexStride = mesh.m_vertexStride;
|
||||
|
||||
numfaces = mesh.m_numTriangles;
|
||||
|
||||
(*indexbase) = (unsigned char *)mesh.m_triangleIndexBase;
|
||||
indexstride = mesh.m_triangleIndexStride;
|
||||
indicestype = mesh.m_indexType;
|
||||
}
|
||||
|
||||
void btTriangleIndexVertexArray::getLockedReadOnlyVertexIndexBase(const unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,const unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart) const
|
||||
{
|
||||
const btIndexedMesh& mesh = m_indexedMeshes[subpart];
|
||||
|
||||
numverts = mesh.m_numVertices;
|
||||
(*vertexbase) = (const unsigned char *)mesh.m_vertexBase;
|
||||
|
||||
type = mesh.m_vertexType;
|
||||
|
||||
vertexStride = mesh.m_vertexStride;
|
||||
|
||||
numfaces = mesh.m_numTriangles;
|
||||
(*indexbase) = (const unsigned char *)mesh.m_triangleIndexBase;
|
||||
indexstride = mesh.m_triangleIndexStride;
|
||||
indicestype = mesh.m_indexType;
|
||||
}
|
||||
|
||||
bool btTriangleIndexVertexArray::hasPremadeAabb() const
|
||||
{
|
||||
return (m_hasAabb == 1);
|
||||
}
|
||||
|
||||
|
||||
void btTriangleIndexVertexArray::setPremadeAabb(const btVector3& aabbMin, const btVector3& aabbMax ) const
|
||||
{
|
||||
m_aabbMin = aabbMin;
|
||||
m_aabbMax = aabbMax;
|
||||
m_hasAabb = 1; // this is intentionally an int see notes in header
|
||||
}
|
||||
|
||||
void btTriangleIndexVertexArray::getPremadeAabb(btVector3* aabbMin, btVector3* aabbMax ) const
|
||||
{
|
||||
*aabbMin = m_aabbMin;
|
||||
*aabbMax = m_aabbMax;
|
||||
}
|
||||
|
||||
|
||||
133
opencl/gpu_narrowphase/host/btTriangleIndexVertexArray.h
Normal file
133
opencl/gpu_narrowphase/host/btTriangleIndexVertexArray.h
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it freely,
|
||||
subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#ifndef BT_TRIANGLE_INDEX_VERTEX_ARRAY_H
|
||||
#define BT_TRIANGLE_INDEX_VERTEX_ARRAY_H
|
||||
|
||||
#include "btStridingMeshInterface.h"
|
||||
#include "BulletCommon/btAlignedObjectArray.h"
|
||||
#include "BulletCommon/btScalar.h"
|
||||
|
||||
|
||||
///The btIndexedMesh indexes a single vertex and index array. Multiple btIndexedMesh objects can be passed into a btTriangleIndexVertexArray using addIndexedMesh.
|
||||
///Instead of the number of indices, we pass the number of triangles.
|
||||
ATTRIBUTE_ALIGNED16( struct) btIndexedMesh
|
||||
{
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
int m_numTriangles;
|
||||
const unsigned char * m_triangleIndexBase;
|
||||
// Size in byte of the indices for one triangle (3*sizeof(index_type) if the indices are tightly packed)
|
||||
int m_triangleIndexStride;
|
||||
int m_numVertices;
|
||||
const unsigned char * m_vertexBase;
|
||||
// Size of a vertex, in bytes
|
||||
int m_vertexStride;
|
||||
|
||||
// The index type is set when adding an indexed mesh to the
|
||||
// btTriangleIndexVertexArray, do not set it manually
|
||||
PHY_ScalarType m_indexType;
|
||||
|
||||
// The vertex type has a default type similar to Bullet's precision mode (float or double)
|
||||
// but can be set manually if you for example run Bullet with double precision but have
|
||||
// mesh data in single precision..
|
||||
PHY_ScalarType m_vertexType;
|
||||
|
||||
|
||||
btIndexedMesh()
|
||||
:m_indexType(PHY_INTEGER),
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
m_vertexType(PHY_DOUBLE)
|
||||
#else // BT_USE_DOUBLE_PRECISION
|
||||
m_vertexType(PHY_FLOAT)
|
||||
#endif // BT_USE_DOUBLE_PRECISION
|
||||
{
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
|
||||
typedef btAlignedObjectArray<btIndexedMesh> IndexedMeshArray;
|
||||
|
||||
///The btTriangleIndexVertexArray allows to access multiple triangle meshes, by indexing into existing triangle/index arrays.
|
||||
///Additional meshes can be added using addIndexedMesh
|
||||
///No duplcate is made of the vertex/index data, it only indexes into external vertex/index arrays.
|
||||
///So keep those arrays around during the lifetime of this btTriangleIndexVertexArray.
|
||||
ATTRIBUTE_ALIGNED16( class) btTriangleIndexVertexArray : public btStridingMeshInterface
|
||||
{
|
||||
protected:
|
||||
IndexedMeshArray m_indexedMeshes;
|
||||
int m_pad[2];
|
||||
mutable int m_hasAabb; // using int instead of bool to maintain alignment
|
||||
mutable btVector3 m_aabbMin;
|
||||
mutable btVector3 m_aabbMax;
|
||||
|
||||
public:
|
||||
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
btTriangleIndexVertexArray() : m_hasAabb(0)
|
||||
{
|
||||
}
|
||||
|
||||
virtual ~btTriangleIndexVertexArray();
|
||||
|
||||
//just to be backwards compatible
|
||||
btTriangleIndexVertexArray(int numTriangles,int* triangleIndexBase,int triangleIndexStride,int numVertices,btScalar* vertexBase,int vertexStride);
|
||||
|
||||
void addIndexedMesh(const btIndexedMesh& mesh, PHY_ScalarType indexType = PHY_INTEGER)
|
||||
{
|
||||
m_indexedMeshes.push_back(mesh);
|
||||
m_indexedMeshes[m_indexedMeshes.size()-1].m_indexType = indexType;
|
||||
}
|
||||
|
||||
|
||||
virtual void getLockedVertexIndexBase(unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0);
|
||||
|
||||
virtual void getLockedReadOnlyVertexIndexBase(const unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,const unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0) const;
|
||||
|
||||
/// unLockVertexBase finishes the access to a subpart of the triangle mesh
|
||||
/// make a call to unLockVertexBase when the read and write access (using getLockedVertexIndexBase) is finished
|
||||
virtual void unLockVertexBase(int subpart) {(void)subpart;}
|
||||
|
||||
virtual void unLockReadOnlyVertexBase(int subpart) const {(void)subpart;}
|
||||
|
||||
/// getNumSubParts returns the number of seperate subparts
|
||||
/// each subpart has a continuous array of vertices and indices
|
||||
virtual int getNumSubParts() const {
|
||||
return (int)m_indexedMeshes.size();
|
||||
}
|
||||
|
||||
IndexedMeshArray& getIndexedMeshArray()
|
||||
{
|
||||
return m_indexedMeshes;
|
||||
}
|
||||
|
||||
const IndexedMeshArray& getIndexedMeshArray() const
|
||||
{
|
||||
return m_indexedMeshes;
|
||||
}
|
||||
|
||||
virtual void preallocateVertices(int numverts){(void) numverts;}
|
||||
virtual void preallocateIndices(int numindices){(void) numindices;}
|
||||
|
||||
virtual bool hasPremadeAabb() const;
|
||||
virtual void setPremadeAabb(const btVector3& aabbMin, const btVector3& aabbMax ) const;
|
||||
virtual void getPremadeAabb(btVector3* aabbMin, btVector3* aabbMax ) const;
|
||||
|
||||
}
|
||||
;
|
||||
|
||||
#endif //BT_TRIANGLE_INDEX_VERTEX_ARRAY_H
|
||||
Reference in New Issue
Block a user