reorder files, in preparation for Bullet 3 -> Bullet 2 merge

This commit is contained in:
erwincoumans
2013-04-29 19:04:08 -07:00
parent 55b69201a9
commit 3ac332f3a7
162 changed files with 215 additions and 3070 deletions

View File

@@ -0,0 +1,18 @@
#ifndef B3_BVH_INFO_H
#define B3_BVH_INFO_H
#include "Bullet3Common/b3Vector3.h"
struct b3BvhInfo
{
b3Vector3 m_aabbMin;
b3Vector3 m_aabbMax;
b3Vector3 m_quantization;
int m_numNodes;
int m_numSubTrees;
int m_nodeOffset;
int m_subTreeOffset;
};
#endif //B3_BVH_INFO_H

View File

@@ -0,0 +1,53 @@
#ifndef B3_COLLIDABLE_H
#define B3_COLLIDABLE_H
enum b3ShapeTypes
{
SHAPE_HEIGHT_FIELD=1,
SHAPE_CONVEX_HULL=3,
SHAPE_PLANE=4,
SHAPE_CONCAVE_TRIMESH=5,
SHAPE_COMPOUND_OF_CONVEX_HULLS=6,
SHAPE_SPHERE=7,
MAX_NUM_SHAPE_TYPES,
};
struct b3Collidable
{
union {
int m_numChildShapes;
int m_bvhIndex;
};
float m_radius;
int m_shapeType;
int m_shapeIndex;
};
struct b3CollidableNew
{
short int m_shapeType;
short int m_numShapes;
int m_shapeIndex;
};
struct b3GpuChildShape
{
float m_childPosition[4];
float m_childOrientation[4];
int m_shapeIndex;
int m_unused0;
int m_unused1;
int m_unused2;
};
struct b3CompoundOverlappingPair
{
int m_bodyIndexA;
int m_bodyIndexB;
// int m_pairType;
int m_childShapeIndexA;
int m_childShapeIndexB;
};
#endif //B3_COLLIDABLE_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,99 @@
#ifndef _CONVEX_HULL_CONTACT_H
#define _CONVEX_HULL_CONTACT_H
#include "Bullet3OpenCL/ParallelPrimitives/b3OpenCLArray.h"
#include "Bullet3Collision/NarrowPhaseCollision/b3RigidBodyCL.h"
#include "Bullet3Common/b3AlignedObjectArray.h"
#include "b3ConvexUtility.h"
#include "b3ConvexPolyhedronCL.h"
#include "b3Collidable.h"
#include "Bullet3Collision/NarrowPhaseCollision/b3Contact4.h"
#include "Bullet3Common/b3Int2.h"
#include "Bullet3Common/b3Int4.h"
#include "b3OptimizedBvh.h"
#include "b3BvhInfo.h"
//#include "../../dynamics/basic_demo/Stubs/ChNarrowPhase.h"
struct b3YetAnotherAabb
{
union
{
float m_min[4];
int m_minIndices[4];
};
union
{
float m_max[4];
//int m_signedMaxIndices[4];
//unsigned int m_unsignedMaxIndices[4];
};
};
struct GpuSatCollision
{
cl_context m_context;
cl_device_id m_device;
cl_command_queue m_queue;
cl_kernel m_findSeparatingAxisKernel;
cl_kernel m_findConcaveSeparatingAxisKernel;
cl_kernel m_findCompoundPairsKernel;
cl_kernel m_processCompoundPairsKernel;
cl_kernel m_clipHullHullKernel;
cl_kernel m_clipCompoundsHullHullKernel;
cl_kernel m_clipFacesAndContactReductionKernel;
cl_kernel m_findClippingFacesKernel;
cl_kernel m_clipHullHullConcaveConvexKernel;
cl_kernel m_extractManifoldAndAddContactKernel;
cl_kernel m_newContactReductionKernel;
cl_kernel m_bvhTraversalKernel;
cl_kernel m_primitiveContactsKernel;
cl_kernel m_findConcaveSphereContactsKernel;
cl_kernel m_processCompoundPairsPrimitivesKernel;
b3OpenCLArray<int> m_totalContactsOut;
GpuSatCollision(cl_context ctx,cl_device_id device, cl_command_queue q );
virtual ~GpuSatCollision();
void computeConvexConvexContactsGPUSAT( const b3OpenCLArray<b3Int2>* pairs, int nPairs,
const b3OpenCLArray<b3RigidBodyCL>* bodyBuf,
b3OpenCLArray<b3Contact4>* contactOut, int& nContacts,
int maxContactCapacity,
const b3OpenCLArray<b3ConvexPolyhedronCL>& hostConvexData,
const b3OpenCLArray<b3Vector3>& vertices,
const b3OpenCLArray<b3Vector3>& uniqueEdges,
const b3OpenCLArray<b3GpuFace>& faces,
const b3OpenCLArray<int>& indices,
const b3OpenCLArray<b3Collidable>& gpuCollidables,
const b3OpenCLArray<b3GpuChildShape>& gpuChildShapes,
const b3OpenCLArray<b3YetAnotherAabb>& clAabbs,
b3OpenCLArray<b3Vector3>& worldVertsB1GPU,
b3OpenCLArray<b3Int4>& clippingFacesOutGPU,
b3OpenCLArray<b3Vector3>& worldNormalsAGPU,
b3OpenCLArray<b3Vector3>& worldVertsA1GPU,
b3OpenCLArray<b3Vector3>& worldVertsB2GPU,
b3AlignedObjectArray<class b3OptimizedBvh*>& bvhData,
b3OpenCLArray<b3QuantizedBvhNode>* treeNodesGPU,
b3OpenCLArray<b3BvhSubtreeInfo>* subTreesGPU,
b3OpenCLArray<b3BvhInfo>* bvhInfo,
int numObjects,
int maxTriConvexPairCapacity,
b3OpenCLArray<b3Int4>& triangleConvexPairs,
int& numTriConvexPairsOut
);
};
#endif //_CONVEX_HULL_CONTACT_H

View File

@@ -0,0 +1,64 @@
#ifndef CONVEX_POLYHEDRON_CL
#define CONVEX_POLYHEDRON_CL
#include "Bullet3Common/b3Transform.h"
struct b3GpuFace
{
b3Vector4 m_plane;
int m_indexOffset;
int m_numIndices;
};
B3_ATTRIBUTE_ALIGNED16(struct) b3ConvexPolyhedronCL
{
b3Vector3 m_localCenter;
b3Vector3 m_extents;
b3Vector3 mC;
b3Vector3 mE;
b3Scalar m_radius;
int m_faceOffset;
int m_numFaces;
int m_numVertices;
int m_vertexOffset;
int m_uniqueEdgesOffset;
int m_numUniqueEdges;
int m_unused;
inline void project(const b3Transform& trans, const b3Vector3& dir, const b3AlignedObjectArray<b3Vector3>& vertices, b3Scalar& min, b3Scalar& max) const
{
min = FLT_MAX;
max = -FLT_MAX;
int numVerts = m_numVertices;
const b3Vector3 localDir = trans.getBasis().transpose()*dir;
const b3Vector3 localDi2 = b3QuatRotate(trans.getRotation().inverse(),dir);
b3Scalar offset = trans.getOrigin().dot(dir);
for(int i=0;i<numVerts;i++)
{
//b3Vector3 pt = trans * vertices[m_vertexOffset+i];
//b3Scalar dp = pt.dot(dir);
b3Scalar dp = vertices[m_vertexOffset+i].dot(localDir);
//b3Assert(dp==dpL);
if(dp < min) min = dp;
if(dp > max) max = dp;
}
if(min>max)
{
b3Scalar tmp = min;
min = max;
max = tmp;
}
min += offset;
max += offset;
}
};
#endif //CONVEX_POLYHEDRON_CL

View File

@@ -0,0 +1,520 @@
/*
Copyright (c) 2012 Advanced Micro Devices, Inc.
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
//Originally written by Erwin Coumans
#include "b3ConvexUtility.h"
#include "Bullet3Geometry/b3ConvexHullComputer.h"
#include "Bullet3Geometry/b3GrahamScan2dConvexHull.h"
#include "Bullet3Common/b3Quaternion.h"
#include "Bullet3Common/b3HashMap.h"
#include "b3ConvexPolyhedronCL.h"
b3ConvexUtility::~b3ConvexUtility()
{
}
bool b3ConvexUtility::initializePolyhedralFeatures(const b3Vector3* orgVertices, int numPoints, bool mergeCoplanarTriangles)
{
b3ConvexHullComputer conv;
conv.compute(&orgVertices[0].getX(), sizeof(b3Vector3),numPoints,0.f,0.f);
b3AlignedObjectArray<b3Vector3> faceNormals;
int numFaces = conv.faces.size();
faceNormals.resize(numFaces);
b3ConvexHullComputer* convexUtil = &conv;
b3AlignedObjectArray<b3MyFace> tmpFaces;
tmpFaces.resize(numFaces);
int numVertices = convexUtil->vertices.size();
m_vertices.resize(numVertices);
for (int p=0;p<numVertices;p++)
{
m_vertices[p] = convexUtil->vertices[p];
}
for (int i=0;i<numFaces;i++)
{
int face = convexUtil->faces[i];
//printf("face=%d\n",face);
const b3ConvexHullComputer::Edge* firstEdge = &convexUtil->edges[face];
const b3ConvexHullComputer::Edge* edge = firstEdge;
b3Vector3 edges[3];
int numEdges = 0;
//compute face normals
do
{
int src = edge->getSourceVertex();
tmpFaces[i].m_indices.push_back(src);
int targ = edge->getTargetVertex();
b3Vector3 wa = convexUtil->vertices[src];
b3Vector3 wb = convexUtil->vertices[targ];
b3Vector3 newEdge = wb-wa;
newEdge.normalize();
if (numEdges<2)
edges[numEdges++] = newEdge;
edge = edge->getNextEdgeOfFace();
} while (edge!=firstEdge);
b3Scalar planeEq = 1e30f;
if (numEdges==2)
{
faceNormals[i] = edges[0].cross(edges[1]);
faceNormals[i].normalize();
tmpFaces[i].m_plane[0] = faceNormals[i].getX();
tmpFaces[i].m_plane[1] = faceNormals[i].getY();
tmpFaces[i].m_plane[2] = faceNormals[i].getZ();
tmpFaces[i].m_plane[3] = planeEq;
}
else
{
b3Assert(0);//degenerate?
faceNormals[i].setZero();
}
for (int v=0;v<tmpFaces[i].m_indices.size();v++)
{
b3Scalar eq = m_vertices[tmpFaces[i].m_indices[v]].dot(faceNormals[i]);
if (planeEq>eq)
{
planeEq=eq;
}
}
tmpFaces[i].m_plane[3] = -planeEq;
}
//merge coplanar faces and copy them to m_polyhedron
b3Scalar faceWeldThreshold= 0.999f;
b3AlignedObjectArray<int> todoFaces;
for (int i=0;i<tmpFaces.size();i++)
todoFaces.push_back(i);
while (todoFaces.size())
{
b3AlignedObjectArray<int> coplanarFaceGroup;
int refFace = todoFaces[todoFaces.size()-1];
coplanarFaceGroup.push_back(refFace);
b3MyFace& faceA = tmpFaces[refFace];
todoFaces.pop_back();
b3Vector3 faceNormalA(faceA.m_plane[0],faceA.m_plane[1],faceA.m_plane[2]);
for (int j=todoFaces.size()-1;j>=0;j--)
{
int i = todoFaces[j];
b3MyFace& faceB = tmpFaces[i];
b3Vector3 faceNormalB(faceB.m_plane[0],faceB.m_plane[1],faceB.m_plane[2]);
if (faceNormalA.dot(faceNormalB)>faceWeldThreshold)
{
coplanarFaceGroup.push_back(i);
todoFaces.remove(i);
}
}
bool did_merge = false;
if (coplanarFaceGroup.size()>1)
{
//do the merge: use Graham Scan 2d convex hull
b3AlignedObjectArray<b3GrahamVector3> orgpoints;
b3Vector3 averageFaceNormal(0,0,0);
for (int i=0;i<coplanarFaceGroup.size();i++)
{
// m_polyhedron->m_faces.push_back(tmpFaces[coplanarFaceGroup[i]]);
b3MyFace& face = tmpFaces[coplanarFaceGroup[i]];
b3Vector3 faceNormal(face.m_plane[0],face.m_plane[1],face.m_plane[2]);
averageFaceNormal+=faceNormal;
for (int f=0;f<face.m_indices.size();f++)
{
int orgIndex = face.m_indices[f];
b3Vector3 pt = m_vertices[orgIndex];
bool found = false;
for (int i=0;i<orgpoints.size();i++)
{
//if ((orgpoints[i].m_orgIndex == orgIndex) || ((rotatedPt-orgpoints[i]).length2()<0.0001))
if (orgpoints[i].m_orgIndex == orgIndex)
{
found=true;
break;
}
}
if (!found)
orgpoints.push_back(b3GrahamVector3(pt,orgIndex));
}
}
b3MyFace combinedFace;
for (int i=0;i<4;i++)
combinedFace.m_plane[i] = tmpFaces[coplanarFaceGroup[0]].m_plane[i];
b3AlignedObjectArray<b3GrahamVector3> hull;
averageFaceNormal.normalize();
b3GrahamScanConvexHull2D(orgpoints,hull,averageFaceNormal);
for (int i=0;i<hull.size();i++)
{
combinedFace.m_indices.push_back(hull[i].m_orgIndex);
for(int k = 0; k < orgpoints.size(); k++)
{
if(orgpoints[k].m_orgIndex == hull[i].m_orgIndex)
{
orgpoints[k].m_orgIndex = -1; // invalidate...
break;
}
}
}
// are there rejected vertices?
bool reject_merge = false;
for(int i = 0; i < orgpoints.size(); i++) {
if(orgpoints[i].m_orgIndex == -1)
continue; // this is in the hull...
// this vertex is rejected -- is anybody else using this vertex?
for(int j = 0; j < tmpFaces.size(); j++) {
b3MyFace& face = tmpFaces[j];
// is this a face of the current coplanar group?
bool is_in_current_group = false;
for(int k = 0; k < coplanarFaceGroup.size(); k++) {
if(coplanarFaceGroup[k] == j) {
is_in_current_group = true;
break;
}
}
if(is_in_current_group) // ignore this face...
continue;
// does this face use this rejected vertex?
for(int v = 0; v < face.m_indices.size(); v++) {
if(face.m_indices[v] == orgpoints[i].m_orgIndex) {
// this rejected vertex is used in another face -- reject merge
reject_merge = true;
break;
}
}
if(reject_merge)
break;
}
if(reject_merge)
break;
}
if (!reject_merge)
{
// do this merge!
did_merge = true;
m_faces.push_back(combinedFace);
}
}
if(!did_merge)
{
for (int i=0;i<coplanarFaceGroup.size();i++)
{
b3MyFace face = tmpFaces[coplanarFaceGroup[i]];
m_faces.push_back(face);
}
}
}
initialize();
return true;
}
inline bool IsAlmostZero(const b3Vector3& v)
{
if(fabsf(v.getX())>1e-6 || fabsf(v.getY())>1e-6 || fabsf(v.getZ())>1e-6) return false;
return true;
}
struct b3InternalVertexPair
{
b3InternalVertexPair(short int v0,short int v1)
:m_v0(v0),
m_v1(v1)
{
if (m_v1>m_v0)
b3Swap(m_v0,m_v1);
}
short int m_v0;
short int m_v1;
int getHash() const
{
return m_v0+(m_v1<<16);
}
bool equals(const b3InternalVertexPair& other) const
{
return m_v0==other.m_v0 && m_v1==other.m_v1;
}
};
struct b3InternalEdge
{
b3InternalEdge()
:m_face0(-1),
m_face1(-1)
{
}
short int m_face0;
short int m_face1;
};
//
#ifdef TEST_INTERNAL_OBJECTS
bool b3ConvexUtility::testContainment() const
{
for(int p=0;p<8;p++)
{
b3Vector3 LocalPt;
if(p==0) LocalPt = m_localCenter + b3Vector3(m_extents[0], m_extents[1], m_extents[2]);
else if(p==1) LocalPt = m_localCenter + b3Vector3(m_extents[0], m_extents[1], -m_extents[2]);
else if(p==2) LocalPt = m_localCenter + b3Vector3(m_extents[0], -m_extents[1], m_extents[2]);
else if(p==3) LocalPt = m_localCenter + b3Vector3(m_extents[0], -m_extents[1], -m_extents[2]);
else if(p==4) LocalPt = m_localCenter + b3Vector3(-m_extents[0], m_extents[1], m_extents[2]);
else if(p==5) LocalPt = m_localCenter + b3Vector3(-m_extents[0], m_extents[1], -m_extents[2]);
else if(p==6) LocalPt = m_localCenter + b3Vector3(-m_extents[0], -m_extents[1], m_extents[2]);
else if(p==7) LocalPt = m_localCenter + b3Vector3(-m_extents[0], -m_extents[1], -m_extents[2]);
for(int i=0;i<m_faces.size();i++)
{
const b3Vector3 Normal(m_faces[i].m_plane[0], m_faces[i].m_plane[1], m_faces[i].m_plane[2]);
const b3Scalar d = LocalPt.dot(Normal) + m_faces[i].m_plane[3];
if(d>0.0f)
return false;
}
}
return true;
}
#endif
void b3ConvexUtility::initialize()
{
b3HashMap<b3InternalVertexPair,b3InternalEdge> edges;
b3Scalar TotalArea = 0.0f;
m_localCenter.setValue(0, 0, 0);
for(int i=0;i<m_faces.size();i++)
{
int numVertices = m_faces[i].m_indices.size();
int NbTris = numVertices;
for(int j=0;j<NbTris;j++)
{
int k = (j+1)%numVertices;
b3InternalVertexPair vp(m_faces[i].m_indices[j],m_faces[i].m_indices[k]);
b3InternalEdge* edptr = edges.find(vp);
b3Vector3 edge = m_vertices[vp.m_v1]-m_vertices[vp.m_v0];
edge.normalize();
bool found = false;
b3Vector3 diff,diff2;
for (int p=0;p<m_uniqueEdges.size();p++)
{
diff = m_uniqueEdges[p]-edge;
diff2 = m_uniqueEdges[p]+edge;
// if ((diff.length2()==0.f) ||
// (diff2.length2()==0.f))
if (IsAlmostZero(diff) ||
IsAlmostZero(diff2))
{
found = true;
break;
}
}
if (!found)
{
m_uniqueEdges.push_back(edge);
}
if (edptr)
{
//TBD: figure out why I added this assert
// b3Assert(edptr->m_face0>=0);
// b3Assert(edptr->m_face1<0);
edptr->m_face1 = i;
} else
{
b3InternalEdge ed;
ed.m_face0 = i;
edges.insert(vp,ed);
}
}
}
#ifdef USE_CONNECTED_FACES
for(int i=0;i<m_faces.size();i++)
{
int numVertices = m_faces[i].m_indices.size();
m_faces[i].m_connectedFaces.resize(numVertices);
for(int j=0;j<numVertices;j++)
{
int k = (j+1)%numVertices;
b3InternalVertexPair vp(m_faces[i].m_indices[j],m_faces[i].m_indices[k]);
b3InternalEdge* edptr = edges.find(vp);
b3Assert(edptr);
b3Assert(edptr->m_face0>=0);
b3Assert(edptr->m_face1>=0);
int connectedFace = (edptr->m_face0==i)?edptr->m_face1:edptr->m_face0;
m_faces[i].m_connectedFaces[j] = connectedFace;
}
}
#endif//USE_CONNECTED_FACES
for(int i=0;i<m_faces.size();i++)
{
int numVertices = m_faces[i].m_indices.size();
int NbTris = numVertices-2;
const b3Vector3& p0 = m_vertices[m_faces[i].m_indices[0]];
for(int j=1;j<=NbTris;j++)
{
int k = (j+1)%numVertices;
const b3Vector3& p1 = m_vertices[m_faces[i].m_indices[j]];
const b3Vector3& p2 = m_vertices[m_faces[i].m_indices[k]];
b3Scalar Area = ((p0 - p1).cross(p0 - p2)).length() * 0.5f;
b3Vector3 Center = (p0+p1+p2)/3.0f;
m_localCenter += Area * Center;
TotalArea += Area;
}
}
m_localCenter /= TotalArea;
#ifdef TEST_INTERNAL_OBJECTS
if(1)
{
m_radius = FLT_MAX;
for(int i=0;i<m_faces.size();i++)
{
const b3Vector3 Normal(m_faces[i].m_plane[0], m_faces[i].m_plane[1], m_faces[i].m_plane[2]);
const b3Scalar dist = b3Fabs(m_localCenter.dot(Normal) + m_faces[i].m_plane[3]);
if(dist<m_radius)
m_radius = dist;
}
b3Scalar MinX = FLT_MAX;
b3Scalar MinY = FLT_MAX;
b3Scalar MinZ = FLT_MAX;
b3Scalar MaxX = -FLT_MAX;
b3Scalar MaxY = -FLT_MAX;
b3Scalar MaxZ = -FLT_MAX;
for(int i=0; i<m_vertices.size(); i++)
{
const b3Vector3& pt = m_vertices[i];
if(pt.getX()<MinX) MinX = pt.getX();
if(pt.getX()>MaxX) MaxX = pt.getX();
if(pt.getY()<MinY) MinY = pt.getY();
if(pt.getY()>MaxY) MaxY = pt.getY();
if(pt.getZ()<MinZ) MinZ = pt.getZ();
if(pt.getZ()>MaxZ) MaxZ = pt.getZ();
}
mC.setValue(MaxX+MinX, MaxY+MinY, MaxZ+MinZ);
mE.setValue(MaxX-MinX, MaxY-MinY, MaxZ-MinZ);
// const b3Scalar r = m_radius / sqrtf(2.0f);
const b3Scalar r = m_radius / sqrtf(3.0f);
const int LargestExtent = mE.maxAxis();
const b3Scalar Step = (mE[LargestExtent]*0.5f - r)/1024.0f;
m_extents[0] = m_extents[1] = m_extents[2] = r;
m_extents[LargestExtent] = mE[LargestExtent]*0.5f;
bool FoundBox = false;
for(int j=0;j<1024;j++)
{
if(testContainment())
{
FoundBox = true;
break;
}
m_extents[LargestExtent] -= Step;
}
if(!FoundBox)
{
m_extents[0] = m_extents[1] = m_extents[2] = r;
}
else
{
// Refine the box
const b3Scalar Step = (m_radius - r)/1024.0f;
const int e0 = (1<<LargestExtent) & 3;
const int e1 = (1<<e0) & 3;
for(int j=0;j<1024;j++)
{
const b3Scalar Saved0 = m_extents[e0];
const b3Scalar Saved1 = m_extents[e1];
m_extents[e0] += Step;
m_extents[e1] += Step;
if(!testContainment())
{
m_extents[e0] = Saved0;
m_extents[e1] = Saved1;
break;
}
}
}
}
#endif
}

View File

@@ -0,0 +1,62 @@
/*
Copyright (c) 2012 Advanced Micro Devices, Inc.
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
//Originally written by Erwin Coumans
#ifndef _BT_CONVEX_UTILITY_H
#define _BT_CONVEX_UTILITY_H
#include "Bullet3Common/b3AlignedObjectArray.h"
#include "Bullet3Common/b3Transform.h"
#include "b3ConvexPolyhedronCL.h"
struct b3MyFace
{
b3AlignedObjectArray<int> m_indices;
b3Scalar m_plane[4];
};
B3_ATTRIBUTE_ALIGNED16(class) b3ConvexUtility
{
public:
B3_DECLARE_ALIGNED_ALLOCATOR();
b3Vector3 m_localCenter;
b3Vector3 m_extents;
b3Vector3 mC;
b3Vector3 mE;
b3Scalar m_radius;
b3AlignedObjectArray<b3Vector3> m_vertices;
b3AlignedObjectArray<b3MyFace> m_faces;
b3AlignedObjectArray<b3Vector3> m_uniqueEdges;
b3ConvexUtility()
{
}
virtual ~b3ConvexUtility();
bool initializePolyhedralFeatures(const b3Vector3* orgVertices, int numVertices, bool mergeCoplanarTriangles=true);
void initialize();
bool testContainment() const;
};
#endif

View File

@@ -0,0 +1,390 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "b3OptimizedBvh.h"
#include "b3StridingMeshInterface.h"
#include "Bullet3Geometry/b3AabbUtil.h"
b3OptimizedBvh::b3OptimizedBvh()
{
}
b3OptimizedBvh::~b3OptimizedBvh()
{
}
void b3OptimizedBvh::build(b3StridingMeshInterface* triangles, bool useQuantizedAabbCompression, const b3Vector3& bvhAabbMin, const b3Vector3& bvhAabbMax)
{
m_useQuantization = useQuantizedAabbCompression;
// NodeArray triangleNodes;
struct NodeTriangleCallback : public b3InternalTriangleIndexCallback
{
NodeArray& m_triangleNodes;
NodeTriangleCallback& operator=(NodeTriangleCallback& other)
{
m_triangleNodes.copyFromArray(other.m_triangleNodes);
return *this;
}
NodeTriangleCallback(NodeArray& triangleNodes)
:m_triangleNodes(triangleNodes)
{
}
virtual void internalProcessTriangleIndex(b3Vector3* triangle,int partId,int triangleIndex)
{
b3OptimizedBvhNode node;
b3Vector3 aabbMin,aabbMax;
aabbMin.setValue(b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT));
aabbMax.setValue(b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT));
aabbMin.setMin(triangle[0]);
aabbMax.setMax(triangle[0]);
aabbMin.setMin(triangle[1]);
aabbMax.setMax(triangle[1]);
aabbMin.setMin(triangle[2]);
aabbMax.setMax(triangle[2]);
//with quantization?
node.m_aabbMinOrg = aabbMin;
node.m_aabbMaxOrg = aabbMax;
node.m_escapeIndex = -1;
//for child nodes
node.m_subPart = partId;
node.m_triangleIndex = triangleIndex;
m_triangleNodes.push_back(node);
}
};
struct QuantizedNodeTriangleCallback : public b3InternalTriangleIndexCallback
{
QuantizedNodeArray& m_triangleNodes;
const b3QuantizedBvh* m_optimizedTree; // for quantization
QuantizedNodeTriangleCallback& operator=(QuantizedNodeTriangleCallback& other)
{
m_triangleNodes.copyFromArray(other.m_triangleNodes);
m_optimizedTree = other.m_optimizedTree;
return *this;
}
QuantizedNodeTriangleCallback(QuantizedNodeArray& triangleNodes,const b3QuantizedBvh* tree)
:m_triangleNodes(triangleNodes),m_optimizedTree(tree)
{
}
virtual void internalProcessTriangleIndex(b3Vector3* triangle,int partId,int triangleIndex)
{
// The partId and triangle index must fit in the same (positive) integer
b3Assert(partId < (1<<MAX_NUM_PARTS_IN_BITS));
b3Assert(triangleIndex < (1<<(31-MAX_NUM_PARTS_IN_BITS)));
//negative indices are reserved for escapeIndex
b3Assert(triangleIndex>=0);
b3QuantizedBvhNode node;
b3Vector3 aabbMin,aabbMax;
aabbMin.setValue(b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT));
aabbMax.setValue(b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT));
aabbMin.setMin(triangle[0]);
aabbMax.setMax(triangle[0]);
aabbMin.setMin(triangle[1]);
aabbMax.setMax(triangle[1]);
aabbMin.setMin(triangle[2]);
aabbMax.setMax(triangle[2]);
//PCK: add these checks for zero dimensions of aabb
const b3Scalar MIN_AABB_DIMENSION = b3Scalar(0.002);
const b3Scalar MIN_AABB_HALF_DIMENSION = b3Scalar(0.001);
if (aabbMax.getX() - aabbMin.getX() < MIN_AABB_DIMENSION)
{
aabbMax.setX(aabbMax.getX() + MIN_AABB_HALF_DIMENSION);
aabbMin.setX(aabbMin.getX() - MIN_AABB_HALF_DIMENSION);
}
if (aabbMax.getY() - aabbMin.getY() < MIN_AABB_DIMENSION)
{
aabbMax.setY(aabbMax.getY() + MIN_AABB_HALF_DIMENSION);
aabbMin.setY(aabbMin.getY() - MIN_AABB_HALF_DIMENSION);
}
if (aabbMax.getZ() - aabbMin.getZ() < MIN_AABB_DIMENSION)
{
aabbMax.setZ(aabbMax.getZ() + MIN_AABB_HALF_DIMENSION);
aabbMin.setZ(aabbMin.getZ() - MIN_AABB_HALF_DIMENSION);
}
m_optimizedTree->quantize(&node.m_quantizedAabbMin[0],aabbMin,0);
m_optimizedTree->quantize(&node.m_quantizedAabbMax[0],aabbMax,1);
node.m_escapeIndexOrTriangleIndex = (partId<<(31-MAX_NUM_PARTS_IN_BITS)) | triangleIndex;
m_triangleNodes.push_back(node);
}
};
int numLeafNodes = 0;
if (m_useQuantization)
{
//initialize quantization values
setQuantizationValues(bvhAabbMin,bvhAabbMax);
QuantizedNodeTriangleCallback callback(m_quantizedLeafNodes,this);
triangles->InternalProcessAllTriangles(&callback,m_bvhAabbMin,m_bvhAabbMax);
//now we have an array of leafnodes in m_leafNodes
numLeafNodes = m_quantizedLeafNodes.size();
m_quantizedContiguousNodes.resize(2*numLeafNodes);
} else
{
NodeTriangleCallback callback(m_leafNodes);
b3Vector3 aabbMin(b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT));
b3Vector3 aabbMax(b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT));
triangles->InternalProcessAllTriangles(&callback,aabbMin,aabbMax);
//now we have an array of leafnodes in m_leafNodes
numLeafNodes = m_leafNodes.size();
m_contiguousNodes.resize(2*numLeafNodes);
}
m_curNodeIndex = 0;
buildTree(0,numLeafNodes);
///if the entire tree is small then subtree size, we need to create a header info for the tree
if(m_useQuantization && !m_SubtreeHeaders.size())
{
b3BvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
subtree.m_rootNodeIndex = 0;
subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
}
//PCK: update the copy of the size
m_subtreeHeaderCount = m_SubtreeHeaders.size();
//PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
m_quantizedLeafNodes.clear();
m_leafNodes.clear();
}
void b3OptimizedBvh::refit(b3StridingMeshInterface* meshInterface,const b3Vector3& aabbMin,const b3Vector3& aabbMax)
{
if (m_useQuantization)
{
setQuantizationValues(aabbMin,aabbMax);
updateBvhNodes(meshInterface,0,m_curNodeIndex,0);
///now update all subtree headers
int i;
for (i=0;i<m_SubtreeHeaders.size();i++)
{
b3BvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[subtree.m_rootNodeIndex]);
}
} else
{
}
}
void b3OptimizedBvh::refitPartial(b3StridingMeshInterface* meshInterface,const b3Vector3& aabbMin,const b3Vector3& aabbMax)
{
//incrementally initialize quantization values
b3Assert(m_useQuantization);
b3Assert(aabbMin.getX() > m_bvhAabbMin.getX());
b3Assert(aabbMin.getY() > m_bvhAabbMin.getY());
b3Assert(aabbMin.getZ() > m_bvhAabbMin.getZ());
b3Assert(aabbMax.getX() < m_bvhAabbMax.getX());
b3Assert(aabbMax.getY() < m_bvhAabbMax.getY());
b3Assert(aabbMax.getZ() < m_bvhAabbMax.getZ());
///we should update all quantization values, using updateBvhNodes(meshInterface);
///but we only update chunks that overlap the given aabb
unsigned short quantizedQueryAabbMin[3];
unsigned short quantizedQueryAabbMax[3];
quantize(&quantizedQueryAabbMin[0],aabbMin,0);
quantize(&quantizedQueryAabbMax[0],aabbMax,1);
int i;
for (i=0;i<this->m_SubtreeHeaders.size();i++)
{
b3BvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
//PCK: unsigned instead of bool
unsigned overlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
if (overlap != 0)
{
updateBvhNodes(meshInterface,subtree.m_rootNodeIndex,subtree.m_rootNodeIndex+subtree.m_subtreeSize,i);
subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[subtree.m_rootNodeIndex]);
}
}
}
void b3OptimizedBvh::updateBvhNodes(b3StridingMeshInterface* meshInterface,int firstNode,int endNode,int index)
{
(void)index;
b3Assert(m_useQuantization);
int curNodeSubPart=-1;
//get access info to trianglemesh data
const unsigned char *vertexbase = 0;
int numverts = 0;
PHY_ScalarType type = PHY_INTEGER;
int stride = 0;
const unsigned char *indexbase = 0;
int indexstride = 0;
int numfaces = 0;
PHY_ScalarType indicestype = PHY_INTEGER;
b3Vector3 triangleVerts[3];
b3Vector3 aabbMin,aabbMax;
const b3Vector3& meshScaling = meshInterface->getScaling();
int i;
for (i=endNode-1;i>=firstNode;i--)
{
b3QuantizedBvhNode& curNode = m_quantizedContiguousNodes[i];
if (curNode.isLeafNode())
{
//recalc aabb from triangle data
int nodeSubPart = curNode.getPartId();
int nodeTriangleIndex = curNode.getTriangleIndex();
if (nodeSubPart != curNodeSubPart)
{
if (curNodeSubPart >= 0)
meshInterface->unLockReadOnlyVertexBase(curNodeSubPart);
meshInterface->getLockedReadOnlyVertexIndexBase(&vertexbase,numverts, type,stride,&indexbase,indexstride,numfaces,indicestype,nodeSubPart);
curNodeSubPart = nodeSubPart;
b3Assert(indicestype==PHY_INTEGER||indicestype==PHY_SHORT);
}
//triangles->getLockedReadOnlyVertexIndexBase(vertexBase,numVerts,
unsigned int* gfxbase = (unsigned int*)(indexbase+nodeTriangleIndex*indexstride);
for (int j=2;j>=0;j--)
{
int graphicsindex = indicestype==PHY_SHORT?((unsigned short*)gfxbase)[j]:gfxbase[j];
if (type == PHY_FLOAT)
{
float* graphicsbase = (float*)(vertexbase+graphicsindex*stride);
triangleVerts[j] = b3Vector3(
graphicsbase[0]*meshScaling.getX(),
graphicsbase[1]*meshScaling.getY(),
graphicsbase[2]*meshScaling.getZ());
}
else
{
double* graphicsbase = (double*)(vertexbase+graphicsindex*stride);
triangleVerts[j] = b3Vector3( b3Scalar(graphicsbase[0]*meshScaling.getX()), b3Scalar(graphicsbase[1]*meshScaling.getY()), b3Scalar(graphicsbase[2]*meshScaling.getZ()));
}
}
aabbMin.setValue(b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT));
aabbMax.setValue(b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT));
aabbMin.setMin(triangleVerts[0]);
aabbMax.setMax(triangleVerts[0]);
aabbMin.setMin(triangleVerts[1]);
aabbMax.setMax(triangleVerts[1]);
aabbMin.setMin(triangleVerts[2]);
aabbMax.setMax(triangleVerts[2]);
quantize(&curNode.m_quantizedAabbMin[0],aabbMin,0);
quantize(&curNode.m_quantizedAabbMax[0],aabbMax,1);
} else
{
//combine aabb from both children
b3QuantizedBvhNode* leftChildNode = &m_quantizedContiguousNodes[i+1];
b3QuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? &m_quantizedContiguousNodes[i+2] :
&m_quantizedContiguousNodes[i+1+leftChildNode->getEscapeIndex()];
{
for (int i=0;i<3;i++)
{
curNode.m_quantizedAabbMin[i] = leftChildNode->m_quantizedAabbMin[i];
if (curNode.m_quantizedAabbMin[i]>rightChildNode->m_quantizedAabbMin[i])
curNode.m_quantizedAabbMin[i]=rightChildNode->m_quantizedAabbMin[i];
curNode.m_quantizedAabbMax[i] = leftChildNode->m_quantizedAabbMax[i];
if (curNode.m_quantizedAabbMax[i] < rightChildNode->m_quantizedAabbMax[i])
curNode.m_quantizedAabbMax[i] = rightChildNode->m_quantizedAabbMax[i];
}
}
}
}
if (curNodeSubPart >= 0)
meshInterface->unLockReadOnlyVertexBase(curNodeSubPart);
}
///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
b3OptimizedBvh* b3OptimizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
{
b3QuantizedBvh* bvh = b3QuantizedBvh::deSerializeInPlace(i_alignedDataBuffer,i_dataBufferSize,i_swapEndian);
//we don't add additional data so just do a static upcast
return static_cast<b3OptimizedBvh*>(bvh);
}

View File

@@ -0,0 +1,65 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
///Contains contributions from Disney Studio's
#ifndef B3_OPTIMIZED_BVH_H
#define B3_OPTIMIZED_BVH_H
#include "b3QuantizedBvh.h"
class b3StridingMeshInterface;
///The b3OptimizedBvh extends the b3QuantizedBvh to create AABB tree for triangle meshes, through the b3StridingMeshInterface.
B3_ATTRIBUTE_ALIGNED16(class) b3OptimizedBvh : public b3QuantizedBvh
{
public:
B3_DECLARE_ALIGNED_ALLOCATOR();
protected:
public:
b3OptimizedBvh();
virtual ~b3OptimizedBvh();
void build(b3StridingMeshInterface* triangles,bool useQuantizedAabbCompression, const b3Vector3& bvhAabbMin, const b3Vector3& bvhAabbMax);
void refit(b3StridingMeshInterface* triangles,const b3Vector3& aabbMin,const b3Vector3& aabbMax);
void refitPartial(b3StridingMeshInterface* triangles,const b3Vector3& aabbMin, const b3Vector3& aabbMax);
void updateBvhNodes(b3StridingMeshInterface* meshInterface,int firstNode,int endNode,int index);
/// Data buffer MUST be 16 byte aligned
virtual bool serializeInPlace(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const
{
return b3QuantizedBvh::serialize(o_alignedDataBuffer,i_dataBufferSize,i_swapEndian);
}
///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
static b3OptimizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
};
#endif //B3_OPTIMIZED_BVH_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,582 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef B3_QUANTIZED_BVH_H
#define B3_QUANTIZED_BVH_H
class b3Serializer;
//#define DEBUG_CHECK_DEQUANTIZATION 1
#ifdef DEBUG_CHECK_DEQUANTIZATION
#ifdef __SPU__
#define printf spu_printf
#endif //__SPU__
#include <stdio.h>
#include <stdlib.h>
#endif //DEBUG_CHECK_DEQUANTIZATION
#include "Bullet3Common/b3Vector3.h"
#include "Bullet3Common/b3AlignedAllocator.h"
#ifdef B3_USE_DOUBLE_PRECISION
#define b3QuantizedBvhData b3QuantizedBvhDoubleData
#define b3OptimizedBvhNodeData b3OptimizedBvhNodeDoubleData
#define b3QuantizedBvhDataName "b3QuantizedBvhDoubleData"
#else
#define b3QuantizedBvhData b3QuantizedBvhFloatData
#define b3OptimizedBvhNodeData b3OptimizedBvhNodeFloatData
#define b3QuantizedBvhDataName "b3QuantizedBvhFloatData"
#endif
//http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrf__m128.asp
//Note: currently we have 16 bytes per quantized node
#define MAX_SUBTREE_SIZE_IN_BYTES 2048
// 10 gives the potential for 1024 parts, with at most 2^21 (2097152) (minus one
// actually) triangles each (since the sign bit is reserved
#define MAX_NUM_PARTS_IN_BITS 10
///b3QuantizedBvhNode is a compressed aabb node, 16 bytes.
///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
B3_ATTRIBUTE_ALIGNED16 (struct) b3QuantizedBvhNode
{
B3_DECLARE_ALIGNED_ALLOCATOR();
//12 bytes
unsigned short int m_quantizedAabbMin[3];
unsigned short int m_quantizedAabbMax[3];
//4 bytes
int m_escapeIndexOrTriangleIndex;
bool isLeafNode() const
{
//skipindex is negative (internal node), triangleindex >=0 (leafnode)
return (m_escapeIndexOrTriangleIndex >= 0);
}
int getEscapeIndex() const
{
b3Assert(!isLeafNode());
return -m_escapeIndexOrTriangleIndex;
}
int getTriangleIndex() const
{
b3Assert(isLeafNode());
unsigned int x=0;
unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS);
// Get only the lower bits where the triangle index is stored
return (m_escapeIndexOrTriangleIndex&~(y));
}
int getPartId() const
{
b3Assert(isLeafNode());
// Get only the highest bits where the part index is stored
return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS));
}
}
;
/// b3OptimizedBvhNode contains both internal and leaf node information.
/// Total node size is 44 bytes / node. You can use the compressed version of 16 bytes.
B3_ATTRIBUTE_ALIGNED16 (struct) b3OptimizedBvhNode
{
B3_DECLARE_ALIGNED_ALLOCATOR();
//32 bytes
b3Vector3 m_aabbMinOrg;
b3Vector3 m_aabbMaxOrg;
//4
int m_escapeIndex;
//8
//for child nodes
int m_subPart;
int m_triangleIndex;
//pad the size to 64 bytes
char m_padding[20];
};
///b3BvhSubtreeInfo provides info to gather a subtree of limited size
B3_ATTRIBUTE_ALIGNED16(class) b3BvhSubtreeInfo
{
public:
B3_DECLARE_ALIGNED_ALLOCATOR();
//12 bytes
unsigned short int m_quantizedAabbMin[3];
unsigned short int m_quantizedAabbMax[3];
//4 bytes, points to the root of the subtree
int m_rootNodeIndex;
//4 bytes
int m_subtreeSize;
int m_padding[3];
b3BvhSubtreeInfo()
{
//memset(&m_padding[0], 0, sizeof(m_padding));
}
void setAabbFromQuantizeNode(const b3QuantizedBvhNode& quantizedNode)
{
m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0];
m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1];
m_quantizedAabbMin[2] = quantizedNode.m_quantizedAabbMin[2];
m_quantizedAabbMax[0] = quantizedNode.m_quantizedAabbMax[0];
m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1];
m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2];
}
}
;
class b3NodeOverlapCallback
{
public:
virtual ~b3NodeOverlapCallback() {};
virtual void processNode(int subPart, int triangleIndex) = 0;
};
#include "Bullet3Common/b3AlignedAllocator.h"
#include "Bullet3Common/b3AlignedObjectArray.h"
///for code readability:
typedef b3AlignedObjectArray<b3OptimizedBvhNode> NodeArray;
typedef b3AlignedObjectArray<b3QuantizedBvhNode> QuantizedNodeArray;
typedef b3AlignedObjectArray<b3BvhSubtreeInfo> BvhSubtreeInfoArray;
///The b3QuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU.
///It is used by the b3BvhTriangleMeshShape as midphase, and by the b3MultiSapBroadphase.
///It is recommended to use quantization for better performance and lower memory requirements.
B3_ATTRIBUTE_ALIGNED16(class) b3QuantizedBvh
{
public:
enum b3TraversalMode
{
TRAVERSAL_STACKLESS = 0,
TRAVERSAL_STACKLESS_CACHE_FRIENDLY,
TRAVERSAL_RECURSIVE
};
b3Vector3 m_bvhAabbMin;
b3Vector3 m_bvhAabbMax;
b3Vector3 m_bvhQuantization;
protected:
int m_bulletVersion; //for serialization versioning. It could also be used to detect endianess.
int m_curNodeIndex;
//quantization data
bool m_useQuantization;
NodeArray m_leafNodes;
NodeArray m_contiguousNodes;
QuantizedNodeArray m_quantizedLeafNodes;
QuantizedNodeArray m_quantizedContiguousNodes;
b3TraversalMode m_traversalMode;
BvhSubtreeInfoArray m_SubtreeHeaders;
//This is only used for serialization so we don't have to add serialization directly to b3AlignedObjectArray
mutable int m_subtreeHeaderCount;
///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
///this might be refactored into a virtual, it is usually not calculated at run-time
void setInternalNodeAabbMin(int nodeIndex, const b3Vector3& aabbMin)
{
if (m_useQuantization)
{
quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] ,aabbMin,0);
} else
{
m_contiguousNodes[nodeIndex].m_aabbMinOrg = aabbMin;
}
}
void setInternalNodeAabbMax(int nodeIndex,const b3Vector3& aabbMax)
{
if (m_useQuantization)
{
quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0],aabbMax,1);
} else
{
m_contiguousNodes[nodeIndex].m_aabbMaxOrg = aabbMax;
}
}
b3Vector3 getAabbMin(int nodeIndex) const
{
if (m_useQuantization)
{
return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMin[0]);
}
//non-quantized
return m_leafNodes[nodeIndex].m_aabbMinOrg;
}
b3Vector3 getAabbMax(int nodeIndex) const
{
if (m_useQuantization)
{
return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMax[0]);
}
//non-quantized
return m_leafNodes[nodeIndex].m_aabbMaxOrg;
}
void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
{
if (m_useQuantization)
{
m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = -escapeIndex;
}
else
{
m_contiguousNodes[nodeIndex].m_escapeIndex = escapeIndex;
}
}
void mergeInternalNodeAabb(int nodeIndex,const b3Vector3& newAabbMin,const b3Vector3& newAabbMax)
{
if (m_useQuantization)
{
unsigned short int quantizedAabbMin[3];
unsigned short int quantizedAabbMax[3];
quantize(quantizedAabbMin,newAabbMin,0);
quantize(quantizedAabbMax,newAabbMax,1);
for (int i=0;i<3;i++)
{
if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] > quantizedAabbMin[i])
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] = quantizedAabbMin[i];
if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] < quantizedAabbMax[i])
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] = quantizedAabbMax[i];
}
} else
{
//non-quantized
m_contiguousNodes[nodeIndex].m_aabbMinOrg.setMin(newAabbMin);
m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax);
}
}
void swapLeafNodes(int firstIndex,int secondIndex);
void assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex);
protected:
void buildTree (int startIndex,int endIndex);
int calcSplittingAxis(int startIndex,int endIndex);
int sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis);
void walkStacklessTree(b3NodeOverlapCallback* nodeCallback,const b3Vector3& aabbMin,const b3Vector3& aabbMax) const;
void walkStacklessQuantizedTreeAgainstRay(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
void walkStacklessQuantizedTree(b3NodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const;
void walkStacklessTreeAgainstRay(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
///tree traversal designed for small-memory processors like PS3 SPU
void walkStacklessQuantizedTreeCacheFriendly(b3NodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
void walkRecursiveQuantizedTreeAgainstQueryAabb(const b3QuantizedBvhNode* currentNode,b3NodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
void walkRecursiveQuantizedTreeAgainstQuantizedTree(const b3QuantizedBvhNode* treeNodeA,const b3QuantizedBvhNode* treeNodeB,b3NodeOverlapCallback* nodeCallback) const;
void updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex);
public:
B3_DECLARE_ALIGNED_ALLOCATOR();
b3QuantizedBvh();
virtual ~b3QuantizedBvh();
///***************************************** expert/internal use only *************************
void setQuantizationValues(const b3Vector3& bvhAabbMin,const b3Vector3& bvhAabbMax,b3Scalar quantizationMargin=b3Scalar(1.0));
QuantizedNodeArray& getLeafNodeArray() { return m_quantizedLeafNodes; }
///buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialized
void buildInternal();
///***************************************** expert/internal use only *************************
void reportAabbOverlappingNodex(b3NodeOverlapCallback* nodeCallback,const b3Vector3& aabbMin,const b3Vector3& aabbMax) const;
void reportRayOverlappingNodex (b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget) const;
void reportBoxCastOverlappingNodex(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin,const b3Vector3& aabbMax) const;
B3_FORCE_INLINE void quantize(unsigned short* out, const b3Vector3& point,int isMax) const
{
b3Assert(m_useQuantization);
b3Assert(point.getX() <= m_bvhAabbMax.getX());
b3Assert(point.getY() <= m_bvhAabbMax.getY());
b3Assert(point.getZ() <= m_bvhAabbMax.getZ());
b3Assert(point.getX() >= m_bvhAabbMin.getX());
b3Assert(point.getY() >= m_bvhAabbMin.getY());
b3Assert(point.getZ() >= m_bvhAabbMin.getZ());
b3Vector3 v = (point - m_bvhAabbMin) * m_bvhQuantization;
///Make sure rounding is done in a way that unQuantize(quantizeWithClamp(...)) is conservative
///end-points always set the first bit, so that they are sorted properly (so that neighbouring AABBs overlap properly)
///@todo: double-check this
if (isMax)
{
out[0] = (unsigned short) (((unsigned short)(v.getX()+b3Scalar(1.)) | 1));
out[1] = (unsigned short) (((unsigned short)(v.getY()+b3Scalar(1.)) | 1));
out[2] = (unsigned short) (((unsigned short)(v.getZ()+b3Scalar(1.)) | 1));
} else
{
out[0] = (unsigned short) (((unsigned short)(v.getX()) & 0xfffe));
out[1] = (unsigned short) (((unsigned short)(v.getY()) & 0xfffe));
out[2] = (unsigned short) (((unsigned short)(v.getZ()) & 0xfffe));
}
#ifdef DEBUG_CHECK_DEQUANTIZATION
b3Vector3 newPoint = unQuantize(out);
if (isMax)
{
if (newPoint.getX() < point.getX())
{
printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
}
if (newPoint.getY() < point.getY())
{
printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
}
if (newPoint.getZ() < point.getZ())
{
printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
}
} else
{
if (newPoint.getX() > point.getX())
{
printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
}
if (newPoint.getY() > point.getY())
{
printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
}
if (newPoint.getZ() > point.getZ())
{
printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
}
}
#endif //DEBUG_CHECK_DEQUANTIZATION
}
B3_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const b3Vector3& point2,int isMax) const
{
b3Assert(m_useQuantization);
b3Vector3 clampedPoint(point2);
clampedPoint.setMax(m_bvhAabbMin);
clampedPoint.setMin(m_bvhAabbMax);
quantize(out,clampedPoint,isMax);
}
B3_FORCE_INLINE b3Vector3 unQuantize(const unsigned short* vecIn) const
{
b3Vector3 vecOut;
vecOut.setValue(
(b3Scalar)(vecIn[0]) / (m_bvhQuantization.getX()),
(b3Scalar)(vecIn[1]) / (m_bvhQuantization.getY()),
(b3Scalar)(vecIn[2]) / (m_bvhQuantization.getZ()));
vecOut += m_bvhAabbMin;
return vecOut;
}
///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees.
void setTraversalMode(b3TraversalMode traversalMode)
{
m_traversalMode = traversalMode;
}
B3_FORCE_INLINE QuantizedNodeArray& getQuantizedNodeArray()
{
return m_quantizedContiguousNodes;
}
B3_FORCE_INLINE BvhSubtreeInfoArray& getSubtreeInfoArray()
{
return m_SubtreeHeaders;
}
////////////////////////////////////////////////////////////////////
/////Calculate space needed to store BVH for serialization
unsigned calculateSerializeBufferSize() const;
/// Data buffer MUST be 16 byte aligned
virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const;
///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
static b3QuantizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
static unsigned int getAlignmentSerializationPadding();
//////////////////////////////////////////////////////////////////////
virtual int calculateSerializeBufferSizeNew() const;
///fills the dataBuffer and returns the struct name (and 0 on failure)
virtual const char* serialize(void* dataBuffer, b3Serializer* serializer) const;
virtual void deSerializeFloat(struct b3QuantizedBvhFloatData& quantizedBvhFloatData);
virtual void deSerializeDouble(struct b3QuantizedBvhDoubleData& quantizedBvhDoubleData);
////////////////////////////////////////////////////////////////////
B3_FORCE_INLINE bool isQuantized()
{
return m_useQuantization;
}
private:
// Special "copy" constructor that allows for in-place deserialization
// Prevents b3Vector3's default constructor from being called, but doesn't inialize much else
// ownsMemory should most likely be false if deserializing, and if you are not, don't call this (it also changes the function signature, which we need)
b3QuantizedBvh(b3QuantizedBvh &other, bool ownsMemory);
}
;
struct b3BvhSubtreeInfoData
{
int m_rootNodeIndex;
int m_subtreeSize;
unsigned short m_quantizedAabbMin[3];
unsigned short m_quantizedAabbMax[3];
};
struct b3OptimizedBvhNodeFloatData
{
b3Vector3FloatData m_aabbMinOrg;
b3Vector3FloatData m_aabbMaxOrg;
int m_escapeIndex;
int m_subPart;
int m_triangleIndex;
char m_pad[4];
};
struct b3OptimizedBvhNodeDoubleData
{
b3Vector3DoubleData m_aabbMinOrg;
b3Vector3DoubleData m_aabbMaxOrg;
int m_escapeIndex;
int m_subPart;
int m_triangleIndex;
char m_pad[4];
};
struct b3QuantizedBvhNodeData
{
unsigned short m_quantizedAabbMin[3];
unsigned short m_quantizedAabbMax[3];
int m_escapeIndexOrTriangleIndex;
};
struct b3QuantizedBvhFloatData
{
b3Vector3FloatData m_bvhAabbMin;
b3Vector3FloatData m_bvhAabbMax;
b3Vector3FloatData m_bvhQuantization;
int m_curNodeIndex;
int m_useQuantization;
int m_numContiguousLeafNodes;
int m_numQuantizedContiguousNodes;
b3OptimizedBvhNodeFloatData *m_contiguousNodesPtr;
b3QuantizedBvhNodeData *m_quantizedContiguousNodesPtr;
b3BvhSubtreeInfoData *m_subTreeInfoPtr;
int m_traversalMode;
int m_numSubtreeHeaders;
};
struct b3QuantizedBvhDoubleData
{
b3Vector3DoubleData m_bvhAabbMin;
b3Vector3DoubleData m_bvhAabbMax;
b3Vector3DoubleData m_bvhQuantization;
int m_curNodeIndex;
int m_useQuantization;
int m_numContiguousLeafNodes;
int m_numQuantizedContiguousNodes;
b3OptimizedBvhNodeDoubleData *m_contiguousNodesPtr;
b3QuantizedBvhNodeData *m_quantizedContiguousNodesPtr;
int m_traversalMode;
int m_numSubtreeHeaders;
b3BvhSubtreeInfoData *m_subTreeInfoPtr;
};
B3_FORCE_INLINE int b3QuantizedBvh::calculateSerializeBufferSizeNew() const
{
return sizeof(b3QuantizedBvhData);
}
#endif //B3_QUANTIZED_BVH_H

View File

@@ -0,0 +1,214 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "b3StridingMeshInterface.h"
b3StridingMeshInterface::~b3StridingMeshInterface()
{
}
void b3StridingMeshInterface::InternalProcessAllTriangles(b3InternalTriangleIndexCallback* callback,const b3Vector3& aabbMin,const b3Vector3& aabbMax) const
{
(void)aabbMin;
(void)aabbMax;
int numtotalphysicsverts = 0;
int part,graphicssubparts = getNumSubParts();
const unsigned char * vertexbase;
const unsigned char * indexbase;
int indexstride;
PHY_ScalarType type;
PHY_ScalarType gfxindextype;
int stride,numverts,numtriangles;
int gfxindex;
b3Vector3 triangle[3];
b3Vector3 meshScaling = getScaling();
///if the number of parts is big, the performance might drop due to the innerloop switch on indextype
for (part=0;part<graphicssubparts ;part++)
{
getLockedReadOnlyVertexIndexBase(&vertexbase,numverts,type,stride,&indexbase,indexstride,numtriangles,gfxindextype,part);
numtotalphysicsverts+=numtriangles*3; //upper bound
///unlike that developers want to pass in double-precision meshes in single-precision Bullet build
///so disable this feature by default
///see patch http://code.google.com/p/bullet/issues/detail?id=213
switch (type)
{
case PHY_FLOAT:
{
float* graphicsbase;
switch (gfxindextype)
{
case PHY_INTEGER:
{
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
{
unsigned int* tri_indices= (unsigned int*)(indexbase+gfxindex*indexstride);
graphicsbase = (float*)(vertexbase+tri_indices[0]*stride);
triangle[0].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(),graphicsbase[2]*meshScaling.getZ());
graphicsbase = (float*)(vertexbase+tri_indices[1]*stride);
triangle[1].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
graphicsbase = (float*)(vertexbase+tri_indices[2]*stride);
triangle[2].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
}
break;
}
case PHY_SHORT:
{
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
{
unsigned short int* tri_indices= (unsigned short int*)(indexbase+gfxindex*indexstride);
graphicsbase = (float*)(vertexbase+tri_indices[0]*stride);
triangle[0].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(),graphicsbase[2]*meshScaling.getZ());
graphicsbase = (float*)(vertexbase+tri_indices[1]*stride);
triangle[1].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
graphicsbase = (float*)(vertexbase+tri_indices[2]*stride);
triangle[2].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
}
break;
}
case PHY_UCHAR:
{
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
{
unsigned char* tri_indices= (unsigned char*)(indexbase+gfxindex*indexstride);
graphicsbase = (float*)(vertexbase+tri_indices[0]*stride);
triangle[0].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(),graphicsbase[2]*meshScaling.getZ());
graphicsbase = (float*)(vertexbase+tri_indices[1]*stride);
triangle[1].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
graphicsbase = (float*)(vertexbase+tri_indices[2]*stride);
triangle[2].setValue(graphicsbase[0]*meshScaling.getX(),graphicsbase[1]*meshScaling.getY(), graphicsbase[2]*meshScaling.getZ());
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
}
break;
}
default:
b3Assert((gfxindextype == PHY_INTEGER) || (gfxindextype == PHY_SHORT));
}
break;
}
case PHY_DOUBLE:
{
double* graphicsbase;
switch (gfxindextype)
{
case PHY_INTEGER:
{
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
{
unsigned int* tri_indices= (unsigned int*)(indexbase+gfxindex*indexstride);
graphicsbase = (double*)(vertexbase+tri_indices[0]*stride);
triangle[0].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(),(b3Scalar)graphicsbase[2]*meshScaling.getZ());
graphicsbase = (double*)(vertexbase+tri_indices[1]*stride);
triangle[1].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(), (b3Scalar)graphicsbase[2]*meshScaling.getZ());
graphicsbase = (double*)(vertexbase+tri_indices[2]*stride);
triangle[2].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(), (b3Scalar)graphicsbase[2]*meshScaling.getZ());
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
}
break;
}
case PHY_SHORT:
{
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
{
unsigned short int* tri_indices= (unsigned short int*)(indexbase+gfxindex*indexstride);
graphicsbase = (double*)(vertexbase+tri_indices[0]*stride);
triangle[0].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(),(b3Scalar)graphicsbase[2]*meshScaling.getZ());
graphicsbase = (double*)(vertexbase+tri_indices[1]*stride);
triangle[1].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(), (b3Scalar)graphicsbase[2]*meshScaling.getZ());
graphicsbase = (double*)(vertexbase+tri_indices[2]*stride);
triangle[2].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(), (b3Scalar)graphicsbase[2]*meshScaling.getZ());
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
}
break;
}
case PHY_UCHAR:
{
for (gfxindex=0;gfxindex<numtriangles;gfxindex++)
{
unsigned char* tri_indices= (unsigned char*)(indexbase+gfxindex*indexstride);
graphicsbase = (double*)(vertexbase+tri_indices[0]*stride);
triangle[0].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(),(b3Scalar)graphicsbase[2]*meshScaling.getZ());
graphicsbase = (double*)(vertexbase+tri_indices[1]*stride);
triangle[1].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(), (b3Scalar)graphicsbase[2]*meshScaling.getZ());
graphicsbase = (double*)(vertexbase+tri_indices[2]*stride);
triangle[2].setValue((b3Scalar)graphicsbase[0]*meshScaling.getX(),(b3Scalar)graphicsbase[1]*meshScaling.getY(), (b3Scalar)graphicsbase[2]*meshScaling.getZ());
callback->internalProcessTriangleIndex(triangle,part,gfxindex);
}
break;
}
default:
b3Assert((gfxindextype == PHY_INTEGER) || (gfxindextype == PHY_SHORT));
}
break;
}
default:
b3Assert((type == PHY_FLOAT) || (type == PHY_DOUBLE));
}
unLockReadOnlyVertexBase(part);
}
}
void b3StridingMeshInterface::calculateAabbBruteForce(b3Vector3& aabbMin,b3Vector3& aabbMax)
{
struct AabbCalculationCallback : public b3InternalTriangleIndexCallback
{
b3Vector3 m_aabbMin;
b3Vector3 m_aabbMax;
AabbCalculationCallback()
{
m_aabbMin.setValue(b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT));
m_aabbMax.setValue(b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT));
}
virtual void internalProcessTriangleIndex(b3Vector3* triangle,int partId,int triangleIndex)
{
(void)partId;
(void)triangleIndex;
m_aabbMin.setMin(triangle[0]);
m_aabbMax.setMax(triangle[0]);
m_aabbMin.setMin(triangle[1]);
m_aabbMax.setMax(triangle[1]);
m_aabbMin.setMin(triangle[2]);
m_aabbMax.setMax(triangle[2]);
}
};
//first calculate the total aabb for all triangles
AabbCalculationCallback aabbCallback;
aabbMin.setValue(b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT),b3Scalar(-B3_LARGE_FLOAT));
aabbMax.setValue(b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT),b3Scalar(B3_LARGE_FLOAT));
InternalProcessAllTriangles(&aabbCallback,aabbMin,aabbMax);
aabbMin = aabbCallback.m_aabbMin;
aabbMax = aabbCallback.m_aabbMax;
}

View File

@@ -0,0 +1,167 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef B3_STRIDING_MESHINTERFACE_H
#define B3_STRIDING_MESHINTERFACE_H
#include "Bullet3Common/b3Vector3.h"
#include "b3TriangleCallback.h"
//#include "b3ConcaveShape.h"
enum PHY_ScalarType {
PHY_FLOAT, PHY_DOUBLE, PHY_INTEGER, PHY_SHORT,
PHY_FIXEDPOINT88, PHY_UCHAR
};
/// The b3StridingMeshInterface is the interface class for high performance generic access to triangle meshes, used in combination with b3BvhTriangleMeshShape and some other collision shapes.
/// Using index striding of 3*sizeof(integer) it can use triangle arrays, using index striding of 1*sizeof(integer) it can handle triangle strips.
/// It allows for sharing graphics and collision meshes. Also it provides locking/unlocking of graphics meshes that are in gpu memory.
B3_ATTRIBUTE_ALIGNED16(class ) b3StridingMeshInterface
{
protected:
b3Vector3 m_scaling;
public:
B3_DECLARE_ALIGNED_ALLOCATOR();
b3StridingMeshInterface() :m_scaling(b3Scalar(1.),b3Scalar(1.),b3Scalar(1.))
{
}
virtual ~b3StridingMeshInterface();
virtual void InternalProcessAllTriangles(b3InternalTriangleIndexCallback* callback,const b3Vector3& aabbMin,const b3Vector3& aabbMax) const;
///brute force method to calculate aabb
void calculateAabbBruteForce(b3Vector3& aabbMin,b3Vector3& aabbMax);
/// get read and write access to a subpart of a triangle mesh
/// this subpart has a continuous array of vertices and indices
/// in this way the mesh can be handled as chunks of memory with striding
/// very similar to OpenGL vertexarray support
/// make a call to unLockVertexBase when the read and write access is finished
virtual void getLockedVertexIndexBase(unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& stride,unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0)=0;
virtual void getLockedReadOnlyVertexIndexBase(const unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& stride,const unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0) const=0;
/// unLockVertexBase finishes the access to a subpart of the triangle mesh
/// make a call to unLockVertexBase when the read and write access (using getLockedVertexIndexBase) is finished
virtual void unLockVertexBase(int subpart)=0;
virtual void unLockReadOnlyVertexBase(int subpart) const=0;
/// getNumSubParts returns the number of seperate subparts
/// each subpart has a continuous array of vertices and indices
virtual int getNumSubParts() const=0;
virtual void preallocateVertices(int numverts)=0;
virtual void preallocateIndices(int numindices)=0;
virtual bool hasPremadeAabb() const { return false; }
virtual void setPremadeAabb(const b3Vector3& aabbMin, const b3Vector3& aabbMax ) const
{
(void) aabbMin;
(void) aabbMax;
}
virtual void getPremadeAabb(b3Vector3* aabbMin, b3Vector3* aabbMax ) const
{
(void) aabbMin;
(void) aabbMax;
}
const b3Vector3& getScaling() const {
return m_scaling;
}
void setScaling(const b3Vector3& scaling)
{
m_scaling = scaling;
}
virtual int calculateSerializeBufferSize() const;
///fills the dataBuffer and returns the struct name (and 0 on failure)
//virtual const char* serialize(void* dataBuffer, b3Serializer* serializer) const;
};
struct b3IntIndexData
{
int m_value;
};
struct b3ShortIntIndexData
{
short m_value;
char m_pad[2];
};
struct b3ShortIntIndexTripletData
{
short m_values[3];
char m_pad[2];
};
struct b3CharIndexTripletData
{
unsigned char m_values[3];
char m_pad;
};
///do not change those serialization structures, it requires an updated sBulletDNAstr/sBulletDNAstr64
struct b3MeshPartData
{
b3Vector3FloatData *m_vertices3f;
b3Vector3DoubleData *m_vertices3d;
b3IntIndexData *m_indices32;
b3ShortIntIndexTripletData *m_3indices16;
b3CharIndexTripletData *m_3indices8;
b3ShortIntIndexData *m_indices16;//backwards compatibility
int m_numTriangles;//length of m_indices = m_numTriangles
int m_numVertices;
};
///do not change those serialization structures, it requires an updated sBulletDNAstr/sBulletDNAstr64
struct b3StridingMeshInterfaceData
{
b3MeshPartData *m_meshPartsPtr;
b3Vector3FloatData m_scaling;
int m_numMeshParts;
char m_padding[4];
};
B3_FORCE_INLINE int b3StridingMeshInterface::calculateSerializeBufferSize() const
{
return sizeof(b3StridingMeshInterfaceData);
}
#endif //B3_STRIDING_MESHINTERFACE_H

View File

@@ -0,0 +1,28 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "b3TriangleCallback.h"
b3TriangleCallback::~b3TriangleCallback()
{
}
b3InternalTriangleIndexCallback::~b3InternalTriangleIndexCallback()
{
}

View File

@@ -0,0 +1,42 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef B3_TRIANGLE_CALLBACK_H
#define B3_TRIANGLE_CALLBACK_H
#include "Bullet3Common/b3Vector3.h"
///The b3TriangleCallback provides a callback for each overlapping triangle when calling processAllTriangles.
///This callback is called by processAllTriangles for all b3ConcaveShape derived class, such as b3BvhTriangleMeshShape, b3StaticPlaneShape and b3HeightfieldTerrainShape.
class b3TriangleCallback
{
public:
virtual ~b3TriangleCallback();
virtual void processTriangle(b3Vector3* triangle, int partId, int triangleIndex) = 0;
};
class b3InternalTriangleIndexCallback
{
public:
virtual ~b3InternalTriangleIndexCallback();
virtual void internalProcessTriangleIndex(b3Vector3* triangle,int partId,int triangleIndex) = 0;
};
#endif //B3_TRIANGLE_CALLBACK_H

View File

@@ -0,0 +1,95 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "b3TriangleIndexVertexArray.h"
b3TriangleIndexVertexArray::b3TriangleIndexVertexArray(int numTriangles,int* triangleIndexBase,int triangleIndexStride,int numVertices,b3Scalar* vertexBase,int vertexStride)
: m_hasAabb(0)
{
b3IndexedMesh mesh;
mesh.m_numTriangles = numTriangles;
mesh.m_triangleIndexBase = (const unsigned char *)triangleIndexBase;
mesh.m_triangleIndexStride = triangleIndexStride;
mesh.m_numVertices = numVertices;
mesh.m_vertexBase = (const unsigned char *)vertexBase;
mesh.m_vertexStride = vertexStride;
addIndexedMesh(mesh);
}
b3TriangleIndexVertexArray::~b3TriangleIndexVertexArray()
{
}
void b3TriangleIndexVertexArray::getLockedVertexIndexBase(unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart)
{
b3Assert(subpart< getNumSubParts() );
b3IndexedMesh& mesh = m_indexedMeshes[subpart];
numverts = mesh.m_numVertices;
(*vertexbase) = (unsigned char *) mesh.m_vertexBase;
type = mesh.m_vertexType;
vertexStride = mesh.m_vertexStride;
numfaces = mesh.m_numTriangles;
(*indexbase) = (unsigned char *)mesh.m_triangleIndexBase;
indexstride = mesh.m_triangleIndexStride;
indicestype = mesh.m_indexType;
}
void b3TriangleIndexVertexArray::getLockedReadOnlyVertexIndexBase(const unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,const unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart) const
{
const b3IndexedMesh& mesh = m_indexedMeshes[subpart];
numverts = mesh.m_numVertices;
(*vertexbase) = (const unsigned char *)mesh.m_vertexBase;
type = mesh.m_vertexType;
vertexStride = mesh.m_vertexStride;
numfaces = mesh.m_numTriangles;
(*indexbase) = (const unsigned char *)mesh.m_triangleIndexBase;
indexstride = mesh.m_triangleIndexStride;
indicestype = mesh.m_indexType;
}
bool b3TriangleIndexVertexArray::hasPremadeAabb() const
{
return (m_hasAabb == 1);
}
void b3TriangleIndexVertexArray::setPremadeAabb(const b3Vector3& aabbMin, const b3Vector3& aabbMax ) const
{
m_aabbMin = aabbMin;
m_aabbMax = aabbMax;
m_hasAabb = 1; // this is intentionally an int see notes in header
}
void b3TriangleIndexVertexArray::getPremadeAabb(b3Vector3* aabbMin, b3Vector3* aabbMax ) const
{
*aabbMin = m_aabbMin;
*aabbMax = m_aabbMax;
}

View File

@@ -0,0 +1,133 @@
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2009 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef B3_TRIANGLE_INDEX_VERTEX_ARRAY_H
#define B3_TRIANGLE_INDEX_VERTEX_ARRAY_H
#include "b3StridingMeshInterface.h"
#include "Bullet3Common/b3AlignedObjectArray.h"
#include "Bullet3Common/b3Scalar.h"
///The b3IndexedMesh indexes a single vertex and index array. Multiple b3IndexedMesh objects can be passed into a b3TriangleIndexVertexArray using addIndexedMesh.
///Instead of the number of indices, we pass the number of triangles.
B3_ATTRIBUTE_ALIGNED16( struct) b3IndexedMesh
{
B3_DECLARE_ALIGNED_ALLOCATOR();
int m_numTriangles;
const unsigned char * m_triangleIndexBase;
// Size in byte of the indices for one triangle (3*sizeof(index_type) if the indices are tightly packed)
int m_triangleIndexStride;
int m_numVertices;
const unsigned char * m_vertexBase;
// Size of a vertex, in bytes
int m_vertexStride;
// The index type is set when adding an indexed mesh to the
// b3TriangleIndexVertexArray, do not set it manually
PHY_ScalarType m_indexType;
// The vertex type has a default type similar to Bullet's precision mode (float or double)
// but can be set manually if you for example run Bullet with double precision but have
// mesh data in single precision..
PHY_ScalarType m_vertexType;
b3IndexedMesh()
:m_indexType(PHY_INTEGER),
#ifdef B3_USE_DOUBLE_PRECISION
m_vertexType(PHY_DOUBLE)
#else // B3_USE_DOUBLE_PRECISION
m_vertexType(PHY_FLOAT)
#endif // B3_USE_DOUBLE_PRECISION
{
}
}
;
typedef b3AlignedObjectArray<b3IndexedMesh> IndexedMeshArray;
///The b3TriangleIndexVertexArray allows to access multiple triangle meshes, by indexing into existing triangle/index arrays.
///Additional meshes can be added using addIndexedMesh
///No duplcate is made of the vertex/index data, it only indexes into external vertex/index arrays.
///So keep those arrays around during the lifetime of this b3TriangleIndexVertexArray.
B3_ATTRIBUTE_ALIGNED16( class) b3TriangleIndexVertexArray : public b3StridingMeshInterface
{
protected:
IndexedMeshArray m_indexedMeshes;
int m_pad[2];
mutable int m_hasAabb; // using int instead of bool to maintain alignment
mutable b3Vector3 m_aabbMin;
mutable b3Vector3 m_aabbMax;
public:
B3_DECLARE_ALIGNED_ALLOCATOR();
b3TriangleIndexVertexArray() : m_hasAabb(0)
{
}
virtual ~b3TriangleIndexVertexArray();
//just to be backwards compatible
b3TriangleIndexVertexArray(int numTriangles,int* triangleIndexBase,int triangleIndexStride,int numVertices,b3Scalar* vertexBase,int vertexStride);
void addIndexedMesh(const b3IndexedMesh& mesh, PHY_ScalarType indexType = PHY_INTEGER)
{
m_indexedMeshes.push_back(mesh);
m_indexedMeshes[m_indexedMeshes.size()-1].m_indexType = indexType;
}
virtual void getLockedVertexIndexBase(unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0);
virtual void getLockedReadOnlyVertexIndexBase(const unsigned char **vertexbase, int& numverts,PHY_ScalarType& type, int& vertexStride,const unsigned char **indexbase,int & indexstride,int& numfaces,PHY_ScalarType& indicestype,int subpart=0) const;
/// unLockVertexBase finishes the access to a subpart of the triangle mesh
/// make a call to unLockVertexBase when the read and write access (using getLockedVertexIndexBase) is finished
virtual void unLockVertexBase(int subpart) {(void)subpart;}
virtual void unLockReadOnlyVertexBase(int subpart) const {(void)subpart;}
/// getNumSubParts returns the number of seperate subparts
/// each subpart has a continuous array of vertices and indices
virtual int getNumSubParts() const {
return (int)m_indexedMeshes.size();
}
IndexedMeshArray& getIndexedMeshArray()
{
return m_indexedMeshes;
}
const IndexedMeshArray& getIndexedMeshArray() const
{
return m_indexedMeshes;
}
virtual void preallocateVertices(int numverts){(void) numverts;}
virtual void preallocateIndices(int numindices){(void) numindices;}
virtual bool hasPremadeAabb() const;
virtual void setPremadeAabb(const b3Vector3& aabbMin, const b3Vector3& aabbMax ) const;
virtual void getPremadeAabb(b3Vector3* aabbMin, b3Vector3* aabbMax ) const;
}
;
#endif //B3_TRIANGLE_INDEX_VERTEX_ARRAY_H

View File

@@ -0,0 +1,310 @@
//keep this enum in sync with the CPU version (in btCollidable.h)
//written by Erwin Coumans
#define SHAPE_CONVEX_HULL 3
#define SHAPE_CONCAVE_TRIMESH 5
#define TRIANGLE_NUM_CONVEX_FACES 5
#define SHAPE_COMPOUND_OF_CONVEX_HULLS 6
#define SHAPE_SPHERE 7
typedef unsigned int u32;
#define MAX_NUM_PARTS_IN_BITS 10
///btQuantizedBvhNode is a compressed aabb node, 16 bytes.
///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
typedef struct
{
//12 bytes
unsigned short int m_quantizedAabbMin[3];
unsigned short int m_quantizedAabbMax[3];
//4 bytes
int m_escapeIndexOrTriangleIndex;
} btQuantizedBvhNode;
typedef struct
{
float4 m_aabbMin;
float4 m_aabbMax;
float4 m_quantization;
int m_numNodes;
int m_numSubTrees;
int m_nodeOffset;
int m_subTreeOffset;
} b3BvhInfo;
/*
bool isLeafNode() const
{
//skipindex is negative (internal node), triangleindex >=0 (leafnode)
return (m_escapeIndexOrTriangleIndex >= 0);
}
int getEscapeIndex() const
{
btAssert(!isLeafNode());
return -m_escapeIndexOrTriangleIndex;
}
int getTriangleIndex() const
{
btAssert(isLeafNode());
unsigned int x=0;
unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS);
// Get only the lower bits where the triangle index is stored
return (m_escapeIndexOrTriangleIndex&~(y));
}
int getPartId() const
{
btAssert(isLeafNode());
// Get only the highest bits where the part index is stored
return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS));
}
*/
int getTriangleIndex(const btQuantizedBvhNode* rootNode)
{
unsigned int x=0;
unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS);
// Get only the lower bits where the triangle index is stored
return (rootNode->m_escapeIndexOrTriangleIndex&~(y));
}
int isLeaf(const btQuantizedBvhNode* rootNode)
{
//skipindex is negative (internal node), triangleindex >=0 (leafnode)
return (rootNode->m_escapeIndexOrTriangleIndex >= 0)? 1 : 0;
}
int getEscapeIndex(const btQuantizedBvhNode* rootNode)
{
return -rootNode->m_escapeIndexOrTriangleIndex;
}
typedef struct
{
//12 bytes
unsigned short int m_quantizedAabbMin[3];
unsigned short int m_quantizedAabbMax[3];
//4 bytes, points to the root of the subtree
int m_rootNodeIndex;
//4 bytes
int m_subtreeSize;
int m_padding[3];
} btBvhSubtreeInfo;
///keep this in sync with btCollidable.h
typedef struct
{
int m_numChildShapes;
int blaat2;
int m_shapeType;
int m_shapeIndex;
} btCollidableGpu;
typedef struct
{
float4 m_childPosition;
float4 m_childOrientation;
int m_shapeIndex;
int m_unused0;
int m_unused1;
int m_unused2;
} btGpuChildShape;
typedef struct
{
float4 m_pos;
float4 m_quat;
float4 m_linVel;
float4 m_angVel;
u32 m_collidableIdx;
float m_invMass;
float m_restituitionCoeff;
float m_frictionCoeff;
} BodyData;
typedef struct
{
union
{
float4 m_min;
float m_minElems[4];
int m_minIndices[4];
};
union
{
float4 m_max;
float m_maxElems[4];
int m_maxIndices[4];
};
} btAabbCL;
int testQuantizedAabbAgainstQuantizedAabb(
const unsigned short int* aabbMin1,
const unsigned short int* aabbMax1,
const unsigned short int* aabbMin2,
const unsigned short int* aabbMax2)
{
//int overlap = 1;
if (aabbMin1[0] > aabbMax2[0])
return 0;
if (aabbMax1[0] < aabbMin2[0])
return 0;
if (aabbMin1[1] > aabbMax2[1])
return 0;
if (aabbMax1[1] < aabbMin2[1])
return 0;
if (aabbMin1[2] > aabbMax2[2])
return 0;
if (aabbMax1[2] < aabbMin2[2])
return 0;
return 1;
//overlap = ((aabbMin1[0] > aabbMax2[0]) || (aabbMax1[0] < aabbMin2[0])) ? 0 : overlap;
//overlap = ((aabbMin1[2] > aabbMax2[2]) || (aabbMax1[2] < aabbMin2[2])) ? 0 : overlap;
//overlap = ((aabbMin1[1] > aabbMax2[1]) || (aabbMax1[1] < aabbMin2[1])) ? 0 : overlap;
//return overlap;
}
void quantizeWithClamp(unsigned short* out, float4 point2,int isMax, float4 bvhAabbMin, float4 bvhAabbMax, float4 bvhQuantization)
{
float4 clampedPoint = max(point2,bvhAabbMin);
clampedPoint = min (clampedPoint, bvhAabbMax);
float4 v = (clampedPoint - bvhAabbMin) * bvhQuantization;
if (isMax)
{
out[0] = (unsigned short) (((unsigned short)(v.x+1.f) | 1));
out[1] = (unsigned short) (((unsigned short)(v.y+1.f) | 1));
out[2] = (unsigned short) (((unsigned short)(v.z+1.f) | 1));
} else
{
out[0] = (unsigned short) (((unsigned short)(v.x) & 0xfffe));
out[1] = (unsigned short) (((unsigned short)(v.y) & 0xfffe));
out[2] = (unsigned short) (((unsigned short)(v.z) & 0xfffe));
}
}
// work-in-progress
__kernel void bvhTraversalKernel( __global const int2* pairs,
__global const BodyData* rigidBodies,
__global const btCollidableGpu* collidables,
__global btAabbCL* aabbs,
__global int4* concavePairsOut,
__global volatile int* numConcavePairsOut,
__global const btBvhSubtreeInfo* subtreeHeadersRoot,
__global const btQuantizedBvhNode* quantizedNodesRoot,
__global const b3BvhInfo* bvhInfos,
int numPairs,
int maxNumConcavePairsCapacity)
{
int id = get_global_id(0);
if (id>=numPairs)
return;
int bodyIndexA = pairs[id].x;
int bodyIndexB = pairs[id].y;
int collidableIndexA = rigidBodies[bodyIndexA].m_collidableIdx;
int collidableIndexB = rigidBodies[bodyIndexB].m_collidableIdx;
//once the broadphase avoids static-static pairs, we can remove this test
if ((rigidBodies[bodyIndexA].m_invMass==0) &&(rigidBodies[bodyIndexB].m_invMass==0))
{
return;
}
if (collidables[collidableIndexA].m_shapeType!=SHAPE_CONCAVE_TRIMESH)
return;
int shapeTypeB = collidables[collidableIndexB].m_shapeType;
if (shapeTypeB!=SHAPE_CONVEX_HULL &&
shapeTypeB!=SHAPE_SPHERE &&
shapeTypeB!=SHAPE_COMPOUND_OF_CONVEX_HULLS
)
return;
b3BvhInfo bvhInfo = bvhInfos[collidables[collidableIndexA].m_numChildShapes];
float4 bvhAabbMin = bvhInfo.m_aabbMin;
float4 bvhAabbMax = bvhInfo.m_aabbMax;
float4 bvhQuantization = bvhInfo.m_quantization;
int numSubtreeHeaders = bvhInfo.m_numSubTrees;
__global const btBvhSubtreeInfo* subtreeHeaders = &subtreeHeadersRoot[bvhInfo.m_subTreeOffset];
__global const btQuantizedBvhNode* quantizedNodes = &quantizedNodesRoot[bvhInfo.m_nodeOffset];
unsigned short int quantizedQueryAabbMin[3];
unsigned short int quantizedQueryAabbMax[3];
quantizeWithClamp(quantizedQueryAabbMin,aabbs[bodyIndexB].m_min,false,bvhAabbMin, bvhAabbMax,bvhQuantization);
quantizeWithClamp(quantizedQueryAabbMax,aabbs[bodyIndexB].m_max,true ,bvhAabbMin, bvhAabbMax,bvhQuantization);
for (int i=0;i<numSubtreeHeaders;i++)
{
btBvhSubtreeInfo subtree = subtreeHeaders[i];
int overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
if (overlap != 0)
{
int startNodeIndex = subtree.m_rootNodeIndex;
int endNodeIndex = subtree.m_rootNodeIndex+subtree.m_subtreeSize;
int curIndex = startNodeIndex;
int escapeIndex;
int isLeafNode;
int aabbOverlap;
while (curIndex < endNodeIndex)
{
btQuantizedBvhNode rootNode = quantizedNodes[curIndex];
aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode.m_quantizedAabbMin,rootNode.m_quantizedAabbMax);
isLeafNode = isLeaf(&rootNode);
if (aabbOverlap)
{
if (isLeafNode)
{
int triangleIndex = getTriangleIndex(&rootNode);
if (shapeTypeB==SHAPE_COMPOUND_OF_CONVEX_HULLS)
{
int numChildrenB = collidables[collidableIndexB].m_numChildShapes;
int pairIdx = atomic_add(numConcavePairsOut,numChildrenB);
for (int b=0;b<numChildrenB;b++)
{
if ((pairIdx+b)<maxNumConcavePairsCapacity)
{
int childShapeIndexB = collidables[collidableIndexB].m_shapeIndex+b;
int4 newPair = (int4)(bodyIndexA,bodyIndexB,triangleIndex,childShapeIndexB);
concavePairsOut[pairIdx+b] = newPair;
}
}
} else
{
int pairIdx = atomic_inc(numConcavePairsOut);
if (pairIdx<maxNumConcavePairsCapacity)
{
int4 newPair = (int4)(bodyIndexA,bodyIndexB,triangleIndex,0);
concavePairsOut[pairIdx] = newPair;
}
}
}
curIndex++;
} else
{
if (isLeafNode)
{
curIndex++;
} else
{
escapeIndex = getEscapeIndex(&rootNode);
curIndex += escapeIndex;
}
}
}
}
}
}

View File

@@ -0,0 +1,313 @@
//this file is autogenerated using stringify.bat (premake --stringify) in the build folder of this project
static const char* bvhTraversalKernelCL= \
"//keep this enum in sync with the CPU version (in b3Collidable.h)\n"
"//written by Erwin Coumans\n"
"\n"
"#define SHAPE_CONVEX_HULL 3\n"
"#define SHAPE_CONCAVE_TRIMESH 5\n"
"#define TRIANGLE_NUM_CONVEX_FACES 5\n"
"#define SHAPE_COMPOUND_OF_CONVEX_HULLS 6\n"
"#define SHAPE_SPHERE 7\n"
"\n"
"typedef unsigned int u32;\n"
"\n"
"#define MAX_NUM_PARTS_IN_BITS 10\n"
"\n"
"///b3QuantizedBvhNode is a compressed aabb node, 16 bytes.\n"
"///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).\n"
"typedef struct\n"
"{\n"
" //12 bytes\n"
" unsigned short int m_quantizedAabbMin[3];\n"
" unsigned short int m_quantizedAabbMax[3];\n"
" //4 bytes\n"
" int m_escapeIndexOrTriangleIndex;\n"
"} b3QuantizedBvhNode;\n"
"\n"
"typedef struct\n"
"{\n"
" float4 m_aabbMin;\n"
" float4 m_aabbMax;\n"
" float4 m_quantization;\n"
" int m_numNodes;\n"
" int m_numSubTrees;\n"
" int m_nodeOffset;\n"
" int m_subTreeOffset;\n"
"\n"
"} b3BvhInfo;\n"
"\n"
"/*\n"
" bool isLeafNode() const\n"
" {\n"
" //skipindex is negative (internal node), triangleindex >=0 (leafnode)\n"
" return (m_escapeIndexOrTriangleIndex >= 0);\n"
" }\n"
" int getEscapeIndex() const\n"
" {\n"
" b3Assert(!isLeafNode());\n"
" return -m_escapeIndexOrTriangleIndex;\n"
" }\n"
" int getTriangleIndex() const\n"
" {\n"
" b3Assert(isLeafNode());\n"
" unsigned int x=0;\n"
" unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS);\n"
" // Get only the lower bits where the triangle index is stored\n"
" return (m_escapeIndexOrTriangleIndex&~(y));\n"
" }\n"
" int getPartId() const\n"
" {\n"
" b3Assert(isLeafNode());\n"
" // Get only the highest bits where the part index is stored\n"
" return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS));\n"
" }\n"
"*/\n"
"\n"
"int getTriangleIndex(const b3QuantizedBvhNode* rootNode)\n"
"{\n"
" unsigned int x=0;\n"
" unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS);\n"
" // Get only the lower bits where the triangle index is stored\n"
" return (rootNode->m_escapeIndexOrTriangleIndex&~(y));\n"
"}\n"
"\n"
"int isLeaf(const b3QuantizedBvhNode* rootNode)\n"
"{\n"
" //skipindex is negative (internal node), triangleindex >=0 (leafnode)\n"
" return (rootNode->m_escapeIndexOrTriangleIndex >= 0)? 1 : 0;\n"
"}\n"
" \n"
"int getEscapeIndex(const b3QuantizedBvhNode* rootNode)\n"
"{\n"
" return -rootNode->m_escapeIndexOrTriangleIndex;\n"
"}\n"
"\n"
"typedef struct\n"
"{\n"
" //12 bytes\n"
" unsigned short int m_quantizedAabbMin[3];\n"
" unsigned short int m_quantizedAabbMax[3];\n"
" //4 bytes, points to the root of the subtree\n"
" int m_rootNodeIndex;\n"
" //4 bytes\n"
" int m_subtreeSize;\n"
" int m_padding[3];\n"
"} b3BvhSubtreeInfo;\n"
"\n"
"///keep this in sync with b3Collidable.h\n"
"typedef struct\n"
"{\n"
" int m_numChildShapes;\n"
" int blaat2;\n"
" int m_shapeType;\n"
" int m_shapeIndex;\n"
" \n"
"} b3CollidableGpu;\n"
"\n"
"typedef struct\n"
"{\n"
" float4 m_childPosition;\n"
" float4 m_childOrientation;\n"
" int m_shapeIndex;\n"
" int m_unused0;\n"
" int m_unused1;\n"
" int m_unused2;\n"
"} b3GpuChildShape;\n"
"\n"
"\n"
"typedef struct\n"
"{\n"
" float4 m_pos;\n"
" float4 m_quat;\n"
" float4 m_linVel;\n"
" float4 m_angVel;\n"
"\n"
" u32 m_collidableIdx;\n"
" float m_invMass;\n"
" float m_restituitionCoeff;\n"
" float m_frictionCoeff;\n"
"} BodyData;\n"
"\n"
"typedef struct \n"
"{\n"
" union\n"
" {\n"
" float4 m_min;\n"
" float m_minElems[4];\n"
" int m_minIndices[4];\n"
" };\n"
" union\n"
" {\n"
" float4 m_max;\n"
" float m_maxElems[4];\n"
" int m_maxIndices[4];\n"
" };\n"
"} b3AabbCL;\n"
"\n"
"\n"
"int testQuantizedAabbAgainstQuantizedAabb(\n"
" const unsigned short int* aabbMin1,\n"
" const unsigned short int* aabbMax1,\n"
" const unsigned short int* aabbMin2,\n"
" const unsigned short int* aabbMax2)\n"
"{\n"
" //int overlap = 1;\n"
" if (aabbMin1[0] > aabbMax2[0])\n"
" return 0;\n"
" if (aabbMax1[0] < aabbMin2[0])\n"
" return 0;\n"
" if (aabbMin1[1] > aabbMax2[1])\n"
" return 0;\n"
" if (aabbMax1[1] < aabbMin2[1])\n"
" return 0;\n"
" if (aabbMin1[2] > aabbMax2[2])\n"
" return 0;\n"
" if (aabbMax1[2] < aabbMin2[2])\n"
" return 0;\n"
" return 1;\n"
" //overlap = ((aabbMin1[0] > aabbMax2[0]) || (aabbMax1[0] < aabbMin2[0])) ? 0 : overlap;\n"
" //overlap = ((aabbMin1[2] > aabbMax2[2]) || (aabbMax1[2] < aabbMin2[2])) ? 0 : overlap;\n"
" //overlap = ((aabbMin1[1] > aabbMax2[1]) || (aabbMax1[1] < aabbMin2[1])) ? 0 : overlap;\n"
" //return overlap;\n"
"}\n"
"\n"
"\n"
"void quantizeWithClamp(unsigned short* out, float4 point2,int isMax, float4 bvhAabbMin, float4 bvhAabbMax, float4 bvhQuantization)\n"
"{\n"
" float4 clampedPoint = max(point2,bvhAabbMin);\n"
" clampedPoint = min (clampedPoint, bvhAabbMax);\n"
"\n"
" float4 v = (clampedPoint - bvhAabbMin) * bvhQuantization;\n"
" if (isMax)\n"
" {\n"
" out[0] = (unsigned short) (((unsigned short)(v.x+1.f) | 1));\n"
" out[1] = (unsigned short) (((unsigned short)(v.y+1.f) | 1));\n"
" out[2] = (unsigned short) (((unsigned short)(v.z+1.f) | 1));\n"
" } else\n"
" {\n"
" out[0] = (unsigned short) (((unsigned short)(v.x) & 0xfffe));\n"
" out[1] = (unsigned short) (((unsigned short)(v.y) & 0xfffe));\n"
" out[2] = (unsigned short) (((unsigned short)(v.z) & 0xfffe));\n"
" }\n"
"\n"
"}\n"
"\n"
"\n"
"// work-in-progress\n"
"__kernel void bvhTraversalKernel( __global const int2* pairs, \n"
" __global const BodyData* rigidBodies, \n"
" __global const b3CollidableGpu* collidables,\n"
" __global b3AabbCL* aabbs,\n"
" __global int4* concavePairsOut,\n"
" __global volatile int* numConcavePairsOut,\n"
" __global const b3BvhSubtreeInfo* subtreeHeadersRoot,\n"
" __global const b3QuantizedBvhNode* quantizedNodesRoot,\n"
" __global const b3BvhInfo* bvhInfos,\n"
" int numPairs,\n"
" int maxNumConcavePairsCapacity)\n"
"{\n"
" int id = get_global_id(0);\n"
" if (id>=numPairs)\n"
" return;\n"
" \n"
" int bodyIndexA = pairs[id].x;\n"
" int bodyIndexB = pairs[id].y;\n"
" int collidableIndexA = rigidBodies[bodyIndexA].m_collidableIdx;\n"
" int collidableIndexB = rigidBodies[bodyIndexB].m_collidableIdx;\n"
" \n"
" //once the broadphase avoids static-static pairs, we can remove this test\n"
" if ((rigidBodies[bodyIndexA].m_invMass==0) &&(rigidBodies[bodyIndexB].m_invMass==0))\n"
" {\n"
" return;\n"
" }\n"
" \n"
" if (collidables[collidableIndexA].m_shapeType!=SHAPE_CONCAVE_TRIMESH)\n"
" return;\n"
"\n"
" int shapeTypeB = collidables[collidableIndexB].m_shapeType;\n"
" \n"
" if (shapeTypeB!=SHAPE_CONVEX_HULL &&\n"
" shapeTypeB!=SHAPE_SPHERE &&\n"
" shapeTypeB!=SHAPE_COMPOUND_OF_CONVEX_HULLS\n"
" )\n"
" return;\n"
"\n"
" b3BvhInfo bvhInfo = bvhInfos[collidables[collidableIndexA].m_numChildShapes];\n"
"\n"
" float4 bvhAabbMin = bvhInfo.m_aabbMin;\n"
" float4 bvhAabbMax = bvhInfo.m_aabbMax;\n"
" float4 bvhQuantization = bvhInfo.m_quantization;\n"
" int numSubtreeHeaders = bvhInfo.m_numSubTrees;\n"
" __global const b3BvhSubtreeInfo* subtreeHeaders = &subtreeHeadersRoot[bvhInfo.m_subTreeOffset];\n"
" __global const b3QuantizedBvhNode* quantizedNodes = &quantizedNodesRoot[bvhInfo.m_nodeOffset];\n"
" \n"
"\n"
" unsigned short int quantizedQueryAabbMin[3];\n"
" unsigned short int quantizedQueryAabbMax[3];\n"
" quantizeWithClamp(quantizedQueryAabbMin,aabbs[bodyIndexB].m_min,false,bvhAabbMin, bvhAabbMax,bvhQuantization);\n"
" quantizeWithClamp(quantizedQueryAabbMax,aabbs[bodyIndexB].m_max,true ,bvhAabbMin, bvhAabbMax,bvhQuantization);\n"
" \n"
" for (int i=0;i<numSubtreeHeaders;i++)\n"
" {\n"
" b3BvhSubtreeInfo subtree = subtreeHeaders[i];\n"
" \n"
" int overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);\n"
" if (overlap != 0)\n"
" {\n"
" int startNodeIndex = subtree.m_rootNodeIndex;\n"
" int endNodeIndex = subtree.m_rootNodeIndex+subtree.m_subtreeSize;\n"
" int curIndex = startNodeIndex;\n"
" int escapeIndex;\n"
" int isLeafNode;\n"
" int aabbOverlap;\n"
" while (curIndex < endNodeIndex)\n"
" {\n"
" b3QuantizedBvhNode rootNode = quantizedNodes[curIndex];\n"
" aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode.m_quantizedAabbMin,rootNode.m_quantizedAabbMax);\n"
" isLeafNode = isLeaf(&rootNode);\n"
" if (aabbOverlap)\n"
" {\n"
" if (isLeafNode)\n"
" {\n"
" int triangleIndex = getTriangleIndex(&rootNode);\n"
" if (shapeTypeB==SHAPE_COMPOUND_OF_CONVEX_HULLS)\n"
" {\n"
" int numChildrenB = collidables[collidableIndexB].m_numChildShapes;\n"
" int pairIdx = atomic_add(numConcavePairsOut,numChildrenB);\n"
" for (int b=0;b<numChildrenB;b++)\n"
" {\n"
" if ((pairIdx+b)<maxNumConcavePairsCapacity)\n"
" {\n"
" int childShapeIndexB = collidables[collidableIndexB].m_shapeIndex+b;\n"
" int4 newPair = (int4)(bodyIndexA,bodyIndexB,triangleIndex,childShapeIndexB);\n"
" concavePairsOut[pairIdx+b] = newPair;\n"
" }\n"
" }\n"
" } else\n"
" {\n"
" int pairIdx = atomic_inc(numConcavePairsOut);\n"
" if (pairIdx<maxNumConcavePairsCapacity)\n"
" {\n"
" int4 newPair = (int4)(bodyIndexA,bodyIndexB,triangleIndex,0);\n"
" concavePairsOut[pairIdx] = newPair;\n"
" }\n"
" }\n"
" } \n"
" curIndex++;\n"
" } else\n"
" {\n"
" if (isLeafNode)\n"
" {\n"
" curIndex++;\n"
" } else\n"
" {\n"
" escapeIndex = getEscapeIndex(&rootNode);\n"
" curIndex += escapeIndex;\n"
" }\n"
" }\n"
" }\n"
" }\n"
" }\n"
"\n"
"}\n"
;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff