Added 'cache friendly' tree traversal format, and traversal. Array of subtrees with specified maximum size. This is useful to fit tree traversals on SPU.
This commit is contained in:
@@ -31,6 +31,7 @@ class btStridingMeshInterface;
|
||||
///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
|
||||
ATTRIBUTE_ALIGNED16 (struct btQuantizedBvhNode)
|
||||
{
|
||||
|
||||
//12 bytes
|
||||
unsigned short int m_quantizedAabbMin[3];
|
||||
unsigned short int m_quantizedAabbMax[3];
|
||||
@@ -86,15 +87,17 @@ public:
|
||||
#include "../../LinearMath/btAlignedObjectArray.h"
|
||||
|
||||
|
||||
|
||||
|
||||
typedef btAlignedObjectArray<btOptimizedBvhNode> NodeArray;
|
||||
|
||||
typedef btAlignedObjectArray<btQuantizedBvhNode> QuantizedNodeArray;
|
||||
|
||||
///OptimizedBvh store an AABB tree that can be quickly traversed on CPU (and SPU,GPU in future)
|
||||
class btOptimizedBvh
|
||||
ATTRIBUTE_ALIGNED16(class btOptimizedBvh)
|
||||
{
|
||||
NodeArray m_leafNodes;
|
||||
btOptimizedBvhNode* m_contiguousNodes;
|
||||
NodeArray m_contiguousNodes;
|
||||
|
||||
QuantizedNodeArray m_quantizedLeafNodes;
|
||||
|
||||
@@ -109,7 +112,43 @@ class btOptimizedBvh
|
||||
btVector3 m_bvhAabbMax;
|
||||
btVector3 m_bvhQuantization;
|
||||
|
||||
//two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
|
||||
enum btTraversalMode
|
||||
{
|
||||
TRAVERSAL_STACKLESS = 0,
|
||||
TRAVERSAL_STACKLESS_CACHE_FRIENDLY,
|
||||
TRAVERSAL_RECURSIVE
|
||||
};
|
||||
|
||||
btTraversalMode m_traversalMode;
|
||||
|
||||
///btBvhSubtreeInfo provides info to gather a subtree of limited size
|
||||
class btBvhSubtreeInfo
|
||||
{
|
||||
public:
|
||||
//12 bytes
|
||||
unsigned short int m_quantizedAabbMin[3];
|
||||
unsigned short int m_quantizedAabbMax[3];
|
||||
//4 bytes, points to the root of the subtree
|
||||
int m_rootNodeIndex;
|
||||
//4 bytes
|
||||
int m_subtreeSize;
|
||||
|
||||
void setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode)
|
||||
{
|
||||
m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0];
|
||||
m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1];
|
||||
m_quantizedAabbMin[2] = quantizedNode.m_quantizedAabbMin[2];
|
||||
m_quantizedAabbMax[0] = quantizedNode.m_quantizedAabbMax[0];
|
||||
m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1];
|
||||
m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2];
|
||||
}
|
||||
};
|
||||
|
||||
btAlignedObjectArray<btBvhSubtreeInfo> m_SubtreeHeaders;
|
||||
|
||||
|
||||
///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
|
||||
///this might be refactored into a virtual, it is usually not calculated at run-time
|
||||
void setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin)
|
||||
{
|
||||
if (m_useQuantization)
|
||||
@@ -211,13 +250,17 @@ protected:
|
||||
|
||||
void walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
|
||||
|
||||
void walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
|
||||
void walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const;
|
||||
|
||||
///tree traversal designed for small-memory processors like PS3 SPU
|
||||
void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
|
||||
|
||||
///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
|
||||
void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
|
||||
|
||||
///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
|
||||
void walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA,const btQuantizedBvhNode* treeNodeB,btNodeOverlapCallback* nodeCallback) const;
|
||||
|
||||
|
||||
inline bool testQuantizedAabbAgainstQuantizedAabb(unsigned short int* aabbMin1,unsigned short int* aabbMax1,const unsigned short int* aabbMin2,const unsigned short int* aabbMax2) const
|
||||
{
|
||||
@@ -228,6 +271,7 @@ protected:
|
||||
return overlap;
|
||||
}
|
||||
|
||||
void updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex);
|
||||
|
||||
public:
|
||||
btOptimizedBvh();
|
||||
@@ -244,6 +288,12 @@ public:
|
||||
|
||||
btVector3 unQuantize(const unsigned short* vecIn) const;
|
||||
|
||||
///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees.
|
||||
void setTraversalMode(btTraversalMode traversalMode)
|
||||
{
|
||||
m_traversalMode = traversalMode;
|
||||
}
|
||||
|
||||
void refit(btStridingMeshInterface* triangles);
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user