bt -> b3 and BT -> B3 rename for content and filenames
This commit is contained in:
@@ -19,41 +19,41 @@ int gNumAlignedAllocs = 0;
|
||||
int gNumAlignedFree = 0;
|
||||
int gTotalBytesAlignedAllocs = 0;//detect memory leaks
|
||||
|
||||
static void *btAllocDefault(size_t size)
|
||||
static void *b3AllocDefault(size_t size)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
static void btFreeDefault(void *ptr)
|
||||
static void b3FreeDefault(void *ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
static btAllocFunc *sAllocFunc = btAllocDefault;
|
||||
static btFreeFunc *sFreeFunc = btFreeDefault;
|
||||
static b3AllocFunc *sAllocFunc = b3AllocDefault;
|
||||
static b3FreeFunc *sFreeFunc = b3FreeDefault;
|
||||
|
||||
|
||||
|
||||
#if defined (BT_HAS_ALIGNED_ALLOCATOR)
|
||||
#if defined (B3_HAS_ALIGNED_ALLOCATOR)
|
||||
#include <malloc.h>
|
||||
static void *btAlignedAllocDefault(size_t size, int alignment)
|
||||
static void *b3AlignedAllocDefault(size_t size, int alignment)
|
||||
{
|
||||
return _aligned_malloc(size, (size_t)alignment);
|
||||
}
|
||||
|
||||
static void btAlignedFreeDefault(void *ptr)
|
||||
static void b3AlignedFreeDefault(void *ptr)
|
||||
{
|
||||
_aligned_free(ptr);
|
||||
}
|
||||
#elif defined(__CELLOS_LV2__)
|
||||
#include <stdlib.h>
|
||||
|
||||
static inline void *btAlignedAllocDefault(size_t size, int alignment)
|
||||
static inline void *b3AlignedAllocDefault(size_t size, int alignment)
|
||||
{
|
||||
return memalign(alignment, size);
|
||||
}
|
||||
|
||||
static inline void btAlignedFreeDefault(void *ptr)
|
||||
static inline void b3AlignedFreeDefault(void *ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
@@ -63,13 +63,13 @@ static inline void btAlignedFreeDefault(void *ptr)
|
||||
|
||||
|
||||
|
||||
static inline void *btAlignedAllocDefault(size_t size, int alignment)
|
||||
static inline void *b3AlignedAllocDefault(size_t size, int alignment)
|
||||
{
|
||||
void *ret;
|
||||
char *real;
|
||||
real = (char *)sAllocFunc(size + sizeof(void *) + (alignment-1));
|
||||
if (real) {
|
||||
ret = btAlignPointer(real + sizeof(void *),alignment);
|
||||
ret = b3AlignPointer(real + sizeof(void *),alignment);
|
||||
*((void **)(ret)-1) = (void *)(real);
|
||||
} else {
|
||||
ret = (void *)(real);
|
||||
@@ -77,7 +77,7 @@ static inline void *btAlignedAllocDefault(size_t size, int alignment)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static inline void btAlignedFreeDefault(void *ptr)
|
||||
static inline void b3AlignedFreeDefault(void *ptr)
|
||||
{
|
||||
void* real;
|
||||
|
||||
@@ -89,26 +89,26 @@ static inline void btAlignedFreeDefault(void *ptr)
|
||||
#endif
|
||||
|
||||
|
||||
static btAlignedAllocFunc *sAlignedAllocFunc = btAlignedAllocDefault;
|
||||
static btAlignedFreeFunc *sAlignedFreeFunc = btAlignedFreeDefault;
|
||||
static b3AlignedAllocFunc *sAlignedAllocFunc = b3AlignedAllocDefault;
|
||||
static b3AlignedFreeFunc *sAlignedFreeFunc = b3AlignedFreeDefault;
|
||||
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc)
|
||||
void b3AlignedAllocSetCustomAligned(b3AlignedAllocFunc *allocFunc, b3AlignedFreeFunc *freeFunc)
|
||||
{
|
||||
sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault;
|
||||
sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault;
|
||||
sAlignedAllocFunc = allocFunc ? allocFunc : b3AlignedAllocDefault;
|
||||
sAlignedFreeFunc = freeFunc ? freeFunc : b3AlignedFreeDefault;
|
||||
}
|
||||
|
||||
void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc)
|
||||
void b3AlignedAllocSetCustom(b3AllocFunc *allocFunc, b3FreeFunc *freeFunc)
|
||||
{
|
||||
sAllocFunc = allocFunc ? allocFunc : btAllocDefault;
|
||||
sFreeFunc = freeFunc ? freeFunc : btFreeDefault;
|
||||
sAllocFunc = allocFunc ? allocFunc : b3AllocDefault;
|
||||
sFreeFunc = freeFunc ? freeFunc : b3FreeDefault;
|
||||
}
|
||||
|
||||
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
#ifdef B3_DEBUG_MEMORY_ALLOCATIONS
|
||||
//this generic allocator provides the total allocated number of bytes
|
||||
#include <stdio.h>
|
||||
|
||||
void* btAlignedAllocInternal (size_t size, int alignment,int line,char* filename)
|
||||
void* b3AlignedAllocInternal (size_t size, int alignment,int line,char* filename)
|
||||
{
|
||||
void *ret;
|
||||
char *real;
|
||||
@@ -119,7 +119,7 @@ void* btAlignedAllocInternal (size_t size, int alignment,int line,char* filen
|
||||
|
||||
real = (char *)sAllocFunc(size + 2*sizeof(void *) + (alignment-1));
|
||||
if (real) {
|
||||
ret = (void*) btAlignPointer(real + 2*sizeof(void *), alignment);
|
||||
ret = (void*) b3AlignPointer(real + 2*sizeof(void *), alignment);
|
||||
*((void **)(ret)-1) = (void *)(real);
|
||||
*((int*)(ret)-2) = size;
|
||||
|
||||
@@ -134,7 +134,7 @@ void* btAlignedAllocInternal (size_t size, int alignment,int line,char* filen
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void btAlignedFreeInternal (void* ptr,int line,char* filename)
|
||||
void b3AlignedFreeInternal (void* ptr,int line,char* filename)
|
||||
{
|
||||
|
||||
void* real;
|
||||
@@ -154,18 +154,18 @@ void btAlignedFreeInternal (void* ptr,int line,char* filename)
|
||||
}
|
||||
}
|
||||
|
||||
#else //BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
#else //B3_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
void* btAlignedAllocInternal (size_t size, int alignment)
|
||||
void* b3AlignedAllocInternal (size_t size, int alignment)
|
||||
{
|
||||
gNumAlignedAllocs++;
|
||||
void* ptr;
|
||||
ptr = sAlignedAllocFunc(size, alignment);
|
||||
// printf("btAlignedAllocInternal %d, %x\n",size,ptr);
|
||||
// printf("b3AlignedAllocInternal %d, %x\n",size,ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void btAlignedFreeInternal (void* ptr)
|
||||
void b3AlignedFreeInternal (void* ptr)
|
||||
{
|
||||
if (!ptr)
|
||||
{
|
||||
@@ -173,9 +173,9 @@ void btAlignedFreeInternal (void* ptr)
|
||||
}
|
||||
|
||||
gNumAlignedFree++;
|
||||
// printf("btAlignedFreeInternal %x\n",ptr);
|
||||
// printf("b3AlignedFreeInternal %x\n",ptr);
|
||||
sAlignedFreeFunc(ptr);
|
||||
}
|
||||
|
||||
#endif //BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
#endif //B3_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
|
||||
@@ -13,50 +13,50 @@ subject to the following restrictions:
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
|
||||
#ifndef BT_ALIGNED_ALLOCATOR
|
||||
#define BT_ALIGNED_ALLOCATOR
|
||||
#ifndef B3_ALIGNED_ALLOCATOR
|
||||
#define B3_ALIGNED_ALLOCATOR
|
||||
|
||||
///we probably replace this with our own aligned memory allocator
|
||||
///so we replace _aligned_malloc and _aligned_free with our own
|
||||
///that is better portable and more predictable
|
||||
|
||||
#include "b3Scalar.h"
|
||||
//#define BT_DEBUG_MEMORY_ALLOCATIONS 1
|
||||
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
//#define B3_DEBUG_MEMORY_ALLOCATIONS 1
|
||||
#ifdef B3_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
#define btAlignedAlloc(a,b) \
|
||||
btAlignedAllocInternal(a,b,__LINE__,__FILE__)
|
||||
#define b3AlignedAlloc(a,b) \
|
||||
b3AlignedAllocInternal(a,b,__LINE__,__FILE__)
|
||||
|
||||
#define btAlignedFree(ptr) \
|
||||
btAlignedFreeInternal(ptr,__LINE__,__FILE__)
|
||||
#define b3AlignedFree(ptr) \
|
||||
b3AlignedFreeInternal(ptr,__LINE__,__FILE__)
|
||||
|
||||
void* btAlignedAllocInternal (size_t size, int alignment,int line,char* filename);
|
||||
void* b3AlignedAllocInternal (size_t size, int alignment,int line,char* filename);
|
||||
|
||||
void btAlignedFreeInternal (void* ptr,int line,char* filename);
|
||||
void b3AlignedFreeInternal (void* ptr,int line,char* filename);
|
||||
|
||||
#else
|
||||
void* btAlignedAllocInternal (size_t size, int alignment);
|
||||
void btAlignedFreeInternal (void* ptr);
|
||||
void* b3AlignedAllocInternal (size_t size, int alignment);
|
||||
void b3AlignedFreeInternal (void* ptr);
|
||||
|
||||
#define btAlignedAlloc(size,alignment) btAlignedAllocInternal(size,alignment)
|
||||
#define btAlignedFree(ptr) btAlignedFreeInternal(ptr)
|
||||
#define b3AlignedAlloc(size,alignment) b3AlignedAllocInternal(size,alignment)
|
||||
#define b3AlignedFree(ptr) b3AlignedFreeInternal(ptr)
|
||||
|
||||
#endif
|
||||
typedef int size_type;
|
||||
|
||||
typedef void *(btAlignedAllocFunc)(size_t size, int alignment);
|
||||
typedef void (btAlignedFreeFunc)(void *memblock);
|
||||
typedef void *(btAllocFunc)(size_t size);
|
||||
typedef void (btFreeFunc)(void *memblock);
|
||||
typedef void *(b3AlignedAllocFunc)(size_t size, int alignment);
|
||||
typedef void (b3AlignedFreeFunc)(void *memblock);
|
||||
typedef void *(b3AllocFunc)(size_t size);
|
||||
typedef void (b3FreeFunc)(void *memblock);
|
||||
|
||||
///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom
|
||||
void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc);
|
||||
///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it.
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc);
|
||||
///The developer can let all Bullet memory allocations go through a custom memory allocator, using b3AlignedAllocSetCustom
|
||||
void b3AlignedAllocSetCustom(b3AllocFunc *allocFunc, b3FreeFunc *freeFunc);
|
||||
///If the developer has already an custom aligned allocator, then b3AlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it.
|
||||
void b3AlignedAllocSetCustomAligned(b3AlignedAllocFunc *allocFunc, b3AlignedFreeFunc *freeFunc);
|
||||
|
||||
|
||||
///The b3AlignedAllocator is a portable class for aligned memory allocations.
|
||||
///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned.
|
||||
///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using b3AlignedAllocSetCustom and b3AlignedAllocSetCustomAligned.
|
||||
template < typename T , unsigned Alignment >
|
||||
class b3AlignedAllocator {
|
||||
|
||||
@@ -83,11 +83,11 @@ public:
|
||||
const_pointer address ( const_reference ref ) const { return &ref; }
|
||||
pointer allocate ( size_type n , const_pointer * hint = 0 ) {
|
||||
(void)hint;
|
||||
return reinterpret_cast< pointer >(btAlignedAlloc( sizeof(value_type) * n , Alignment ));
|
||||
return reinterpret_cast< pointer >(b3AlignedAlloc( sizeof(value_type) * n , Alignment ));
|
||||
}
|
||||
void construct ( pointer ptr , const value_type & value ) { new (ptr) value_type( value ); }
|
||||
void deallocate( pointer ptr ) {
|
||||
btAlignedFree( reinterpret_cast< void * >( ptr ) );
|
||||
b3AlignedFree( reinterpret_cast< void * >( ptr ) );
|
||||
}
|
||||
void destroy ( pointer ptr ) { ptr->~value_type(); }
|
||||
|
||||
@@ -103,5 +103,5 @@ public:
|
||||
|
||||
|
||||
|
||||
#endif //BT_ALIGNED_ALLOCATOR
|
||||
#endif //B3_ALIGNED_ALLOCATOR
|
||||
|
||||
|
||||
@@ -14,30 +14,30 @@ subject to the following restrictions:
|
||||
*/
|
||||
|
||||
|
||||
#ifndef BT_OBJECT_ARRAY__
|
||||
#define BT_OBJECT_ARRAY__
|
||||
#ifndef B3_OBJECT_ARRAY__
|
||||
#define B3_OBJECT_ARRAY__
|
||||
|
||||
#include "b3Scalar.h" // has definitions like SIMD_FORCE_INLINE
|
||||
#include "b3AlignedAllocator.h"
|
||||
|
||||
///If the platform doesn't support placement new, you can disable BT_USE_PLACEMENT_NEW
|
||||
///If the platform doesn't support placement new, you can disable B3_USE_PLACEMENT_NEW
|
||||
///then the b3AlignedObjectArray doesn't support objects with virtual methods, and non-trivial constructors/destructors
|
||||
///You can enable BT_USE_MEMCPY, then swapping elements in the array will use memcpy instead of operator=
|
||||
///You can enable B3_USE_MEMCPY, then swapping elements in the array will use memcpy instead of operator=
|
||||
///see discussion here: http://continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1231 and
|
||||
///http://www.continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1240
|
||||
|
||||
#define BT_USE_PLACEMENT_NEW 1
|
||||
//#define BT_USE_MEMCPY 1 //disable, because it is cumbersome to find out for each platform where memcpy is defined. It can be in <memory.h> or <string.h> or otherwise...
|
||||
#define BT_ALLOW_ARRAY_COPY_OPERATOR // enabling this can accidently perform deep copies of data if you are not careful
|
||||
#define B3_USE_PLACEMENT_NEW 1
|
||||
//#define B3_USE_MEMCPY 1 //disable, because it is cumbersome to find out for each platform where memcpy is defined. It can be in <memory.h> or <string.h> or otherwise...
|
||||
#define B3_ALLOW_ARRAY_COPY_OPERATOR // enabling this can accidently perform deep copies of data if you are not careful
|
||||
|
||||
#ifdef BT_USE_MEMCPY
|
||||
#ifdef B3_USE_MEMCPY
|
||||
#include <memory.h>
|
||||
#include <string.h>
|
||||
#endif //BT_USE_MEMCPY
|
||||
#endif //B3_USE_MEMCPY
|
||||
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
#ifdef B3_USE_PLACEMENT_NEW
|
||||
#include <new> //for placement new
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
#endif //B3_USE_PLACEMENT_NEW
|
||||
|
||||
|
||||
///The b3AlignedObjectArray template class uses a subset of the stl::vector interface for its methods
|
||||
@@ -54,17 +54,17 @@ class b3AlignedObjectArray
|
||||
//PCK: added this line
|
||||
bool m_ownsMemory;
|
||||
|
||||
#ifdef BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
#ifdef B3_ALLOW_ARRAY_COPY_OPERATOR
|
||||
public:
|
||||
SIMD_FORCE_INLINE b3AlignedObjectArray<T>& operator=(const b3AlignedObjectArray<T> &other)
|
||||
{
|
||||
copyFromArray(other);
|
||||
return *this;
|
||||
}
|
||||
#else//BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
#else//B3_ALLOW_ARRAY_COPY_OPERATOR
|
||||
private:
|
||||
SIMD_FORCE_INLINE b3AlignedObjectArray<T>& operator=(const b3AlignedObjectArray<T> &other);
|
||||
#endif//BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
#endif//B3_ALLOW_ARRAY_COPY_OPERATOR
|
||||
|
||||
protected:
|
||||
SIMD_FORCE_INLINE int allocSize(int size)
|
||||
@@ -75,11 +75,11 @@ protected:
|
||||
{
|
||||
int i;
|
||||
for (i=start;i<end;++i)
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
#ifdef B3_USE_PLACEMENT_NEW
|
||||
new (&dest[i]) T(m_data[i]);
|
||||
#else
|
||||
dest[i] = m_data[i];
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
#endif //B3_USE_PLACEMENT_NEW
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void init()
|
||||
@@ -153,29 +153,29 @@ protected:
|
||||
|
||||
SIMD_FORCE_INLINE const T& at(int n) const
|
||||
{
|
||||
btAssert(n>=0);
|
||||
btAssert(n<size());
|
||||
b3Assert(n>=0);
|
||||
b3Assert(n<size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T& at(int n)
|
||||
{
|
||||
btAssert(n>=0);
|
||||
btAssert(n<size());
|
||||
b3Assert(n>=0);
|
||||
b3Assert(n<size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE const T& operator[](int n) const
|
||||
{
|
||||
btAssert(n>=0);
|
||||
btAssert(n<size());
|
||||
b3Assert(n>=0);
|
||||
b3Assert(n<size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T& operator[](int n)
|
||||
{
|
||||
btAssert(n>=0);
|
||||
btAssert(n<size());
|
||||
b3Assert(n>=0);
|
||||
b3Assert(n<size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ protected:
|
||||
|
||||
SIMD_FORCE_INLINE void pop_back()
|
||||
{
|
||||
btAssert(m_size>0);
|
||||
b3Assert(m_size>0);
|
||||
m_size--;
|
||||
m_data[m_size].~T();
|
||||
}
|
||||
@@ -233,12 +233,12 @@ protected:
|
||||
{
|
||||
reserve(newsize);
|
||||
}
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
#ifdef B3_USE_PLACEMENT_NEW
|
||||
for (int i=curSize;i<newsize;i++)
|
||||
{
|
||||
new ( &m_data[i]) T(fillData);
|
||||
}
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
#endif //B3_USE_PLACEMENT_NEW
|
||||
|
||||
}
|
||||
|
||||
@@ -265,7 +265,7 @@ protected:
|
||||
reserve( allocSize(size()) );
|
||||
}
|
||||
m_size++;
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
#ifdef B3_USE_PLACEMENT_NEW
|
||||
new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory)
|
||||
#endif
|
||||
|
||||
@@ -281,11 +281,11 @@ protected:
|
||||
reserve( allocSize(size()) );
|
||||
}
|
||||
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
#ifdef B3_USE_PLACEMENT_NEW
|
||||
new ( &m_data[m_size] ) T(_Val);
|
||||
#else
|
||||
m_data[size()] = _Val;
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
#endif //B3_USE_PLACEMENT_NEW
|
||||
|
||||
m_size++;
|
||||
}
|
||||
@@ -302,7 +302,7 @@ protected:
|
||||
if (capacity() < _Count)
|
||||
{ // not enough room, reallocate
|
||||
T* s = (T*)allocate(_Count);
|
||||
btAssert(s);
|
||||
b3Assert(s);
|
||||
|
||||
copy(0, size(), s);
|
||||
|
||||
@@ -407,7 +407,7 @@ protected:
|
||||
|
||||
void swap(int index0,int index1)
|
||||
{
|
||||
#ifdef BT_USE_MEMCPY
|
||||
#ifdef B3_USE_MEMCPY
|
||||
char temp[sizeof(T)];
|
||||
memcpy(temp,&m_data[index0],sizeof(T));
|
||||
memcpy(&m_data[index0],&m_data[index1],sizeof(T));
|
||||
@@ -416,7 +416,7 @@ protected:
|
||||
T temp = m_data[index0];
|
||||
m_data[index0] = m_data[index1];
|
||||
m_data[index1] = temp;
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
#endif //B3_USE_PLACEMENT_NEW
|
||||
|
||||
}
|
||||
|
||||
@@ -509,4 +509,4 @@ protected:
|
||||
|
||||
};
|
||||
|
||||
#endif //BT_OBJECT_ARRAY__
|
||||
#endif //B3_OBJECT_ARRAY__
|
||||
|
||||
@@ -14,13 +14,13 @@ subject to the following restrictions:
|
||||
*/
|
||||
|
||||
|
||||
#ifndef BT_HASH_MAP_H
|
||||
#define BT_HASH_MAP_H
|
||||
#ifndef B3_HASH_MAP_H
|
||||
#define B3_HASH_MAP_H
|
||||
|
||||
#include "b3AlignedObjectArray.h"
|
||||
|
||||
///very basic hashable string implementation, compatible with b3HashMap
|
||||
struct btHashString
|
||||
struct b3HashString
|
||||
{
|
||||
const char* m_string;
|
||||
unsigned int m_hash;
|
||||
@@ -30,7 +30,7 @@ struct btHashString
|
||||
return m_hash;
|
||||
}
|
||||
|
||||
btHashString(const char* name)
|
||||
b3HashString(const char* name)
|
||||
:m_string(name)
|
||||
{
|
||||
/* magic numbers from http://www.isthe.com/chongo/tech/comp/fnv/ */
|
||||
@@ -63,7 +63,7 @@ struct btHashString
|
||||
return( ret );
|
||||
}
|
||||
|
||||
bool equals(const btHashString& other) const
|
||||
bool equals(const b3HashString& other) const
|
||||
{
|
||||
return (m_string == other.m_string) ||
|
||||
(0==portableStringCompare(m_string,other.m_string));
|
||||
@@ -72,14 +72,14 @@ struct btHashString
|
||||
|
||||
};
|
||||
|
||||
const int BT_HASH_NULL=0xffffffff;
|
||||
const int B3_HASH_NULL=0xffffffff;
|
||||
|
||||
|
||||
class btHashInt
|
||||
class b3HashInt
|
||||
{
|
||||
int m_uid;
|
||||
public:
|
||||
btHashInt(int uid) :m_uid(uid)
|
||||
b3HashInt(int uid) :m_uid(uid)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ public:
|
||||
m_uid = uid;
|
||||
}
|
||||
|
||||
bool equals(const btHashInt& other) const
|
||||
bool equals(const b3HashInt& other) const
|
||||
{
|
||||
return getUid1() == other.getUid1();
|
||||
}
|
||||
@@ -109,7 +109,7 @@ public:
|
||||
|
||||
|
||||
|
||||
class btHashPtr
|
||||
class b3HashPtr
|
||||
{
|
||||
|
||||
union
|
||||
@@ -120,7 +120,7 @@ class btHashPtr
|
||||
|
||||
public:
|
||||
|
||||
btHashPtr(const void* ptr)
|
||||
b3HashPtr(const void* ptr)
|
||||
:m_pointer(ptr)
|
||||
{
|
||||
}
|
||||
@@ -130,7 +130,7 @@ public:
|
||||
return m_pointer;
|
||||
}
|
||||
|
||||
bool equals(const btHashPtr& other) const
|
||||
bool equals(const b3HashPtr& other) const
|
||||
{
|
||||
return getPointer() == other.getPointer();
|
||||
}
|
||||
@@ -152,12 +152,12 @@ public:
|
||||
|
||||
|
||||
template <class Value>
|
||||
class btHashKeyPtr
|
||||
class b3HashKeyPtr
|
||||
{
|
||||
int m_uid;
|
||||
public:
|
||||
|
||||
btHashKeyPtr(int uid) :m_uid(uid)
|
||||
b3HashKeyPtr(int uid) :m_uid(uid)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ public:
|
||||
return m_uid;
|
||||
}
|
||||
|
||||
bool equals(const btHashKeyPtr<Value>& other) const
|
||||
bool equals(const b3HashKeyPtr<Value>& other) const
|
||||
{
|
||||
return getUid1() == other.getUid1();
|
||||
}
|
||||
@@ -185,12 +185,12 @@ public:
|
||||
|
||||
|
||||
template <class Value>
|
||||
class btHashKey
|
||||
class b3HashKey
|
||||
{
|
||||
int m_uid;
|
||||
public:
|
||||
|
||||
btHashKey(int uid) :m_uid(uid)
|
||||
b3HashKey(int uid) :m_uid(uid)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ public:
|
||||
return m_uid;
|
||||
}
|
||||
|
||||
bool equals(const btHashKey<Value>& other) const
|
||||
bool equals(const b3HashKey<Value>& other) const
|
||||
{
|
||||
return getUid1() == other.getUid1();
|
||||
}
|
||||
@@ -243,11 +243,11 @@ protected:
|
||||
|
||||
for (i= 0; i < newCapacity; ++i)
|
||||
{
|
||||
m_hashTable[i] = BT_HASH_NULL;
|
||||
m_hashTable[i] = B3_HASH_NULL;
|
||||
}
|
||||
for (i = 0; i < newCapacity; ++i)
|
||||
{
|
||||
m_next[i] = BT_HASH_NULL;
|
||||
m_next[i] = B3_HASH_NULL;
|
||||
}
|
||||
|
||||
for(i=0;i<curHashtableSize;i++)
|
||||
@@ -271,7 +271,7 @@ protected:
|
||||
|
||||
//replace value if the key is already there
|
||||
int index = findIndex(key);
|
||||
if (index != BT_HASH_NULL)
|
||||
if (index != B3_HASH_NULL)
|
||||
{
|
||||
m_valueArray[index]=value;
|
||||
return;
|
||||
@@ -299,25 +299,25 @@ protected:
|
||||
|
||||
int pairIndex = findIndex(key);
|
||||
|
||||
if (pairIndex ==BT_HASH_NULL)
|
||||
if (pairIndex ==B3_HASH_NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Remove the pair from the hash table.
|
||||
int index = m_hashTable[hash];
|
||||
btAssert(index != BT_HASH_NULL);
|
||||
b3Assert(index != B3_HASH_NULL);
|
||||
|
||||
int previous = BT_HASH_NULL;
|
||||
int previous = B3_HASH_NULL;
|
||||
while (index != pairIndex)
|
||||
{
|
||||
previous = index;
|
||||
index = m_next[index];
|
||||
}
|
||||
|
||||
if (previous != BT_HASH_NULL)
|
||||
if (previous != B3_HASH_NULL)
|
||||
{
|
||||
btAssert(m_next[previous] == pairIndex);
|
||||
b3Assert(m_next[previous] == pairIndex);
|
||||
m_next[previous] = m_next[pairIndex];
|
||||
}
|
||||
else
|
||||
@@ -343,18 +343,18 @@ protected:
|
||||
int lastHash = m_keyArray[lastPairIndex].getHash() & (m_valueArray.capacity()-1);
|
||||
|
||||
index = m_hashTable[lastHash];
|
||||
btAssert(index != BT_HASH_NULL);
|
||||
b3Assert(index != B3_HASH_NULL);
|
||||
|
||||
previous = BT_HASH_NULL;
|
||||
previous = B3_HASH_NULL;
|
||||
while (index != lastPairIndex)
|
||||
{
|
||||
previous = index;
|
||||
index = m_next[index];
|
||||
}
|
||||
|
||||
if (previous != BT_HASH_NULL)
|
||||
if (previous != B3_HASH_NULL)
|
||||
{
|
||||
btAssert(m_next[previous] == lastPairIndex);
|
||||
b3Assert(m_next[previous] == lastPairIndex);
|
||||
m_next[previous] = m_next[lastPairIndex];
|
||||
}
|
||||
else
|
||||
@@ -383,14 +383,14 @@ protected:
|
||||
|
||||
const Value* getAtIndex(int index) const
|
||||
{
|
||||
btAssert(index < m_valueArray.size());
|
||||
b3Assert(index < m_valueArray.size());
|
||||
|
||||
return &m_valueArray[index];
|
||||
}
|
||||
|
||||
Value* getAtIndex(int index)
|
||||
{
|
||||
btAssert(index < m_valueArray.size());
|
||||
b3Assert(index < m_valueArray.size());
|
||||
|
||||
return &m_valueArray[index];
|
||||
}
|
||||
@@ -402,7 +402,7 @@ protected:
|
||||
const Value* find(const Key& key) const
|
||||
{
|
||||
int index = findIndex(key);
|
||||
if (index == BT_HASH_NULL)
|
||||
if (index == B3_HASH_NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -412,7 +412,7 @@ protected:
|
||||
Value* find(const Key& key)
|
||||
{
|
||||
int index = findIndex(key);
|
||||
if (index == BT_HASH_NULL)
|
||||
if (index == B3_HASH_NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -426,11 +426,11 @@ protected:
|
||||
|
||||
if (hash >= (unsigned int)m_hashTable.size())
|
||||
{
|
||||
return BT_HASH_NULL;
|
||||
return B3_HASH_NULL;
|
||||
}
|
||||
|
||||
int index = m_hashTable[hash];
|
||||
while ((index != BT_HASH_NULL) && key.equals(m_keyArray[index]) == false)
|
||||
while ((index != B3_HASH_NULL) && key.equals(m_keyArray[index]) == false)
|
||||
{
|
||||
index = m_next[index];
|
||||
}
|
||||
@@ -447,4 +447,4 @@ protected:
|
||||
|
||||
};
|
||||
|
||||
#endif //BT_HASH_MAP_H
|
||||
#endif //B3_HASH_MAP_H
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#ifndef BT_INT2_H
|
||||
#define BT_INT2_H
|
||||
#ifndef B3_INT2_H
|
||||
#define B3_INT2_H
|
||||
|
||||
struct btUnsignedInt2
|
||||
struct b3UnsignedInt2
|
||||
{
|
||||
union
|
||||
{
|
||||
@@ -16,7 +16,7 @@ struct btUnsignedInt2
|
||||
};
|
||||
};
|
||||
|
||||
struct btInt2
|
||||
struct b3Int2
|
||||
{
|
||||
union
|
||||
{
|
||||
@@ -13,29 +13,29 @@ subject to the following restrictions:
|
||||
*/
|
||||
|
||||
|
||||
#ifndef BT_MATRIX3x3_H
|
||||
#define BT_MATRIX3x3_H
|
||||
#ifndef B3_MATRIX3x3_H
|
||||
#define B3_MATRIX3x3_H
|
||||
|
||||
#include "b3Vector3.h"
|
||||
#include "b3Quaternion.h"
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef BT_USE_SSE
|
||||
#ifdef B3_USE_SSE
|
||||
//const __m128 ATTRIBUTE_ALIGNED16(v2220) = {2.0f, 2.0f, 2.0f, 0.0f};
|
||||
const __m128 ATTRIBUTE_ALIGNED16(vMPPP) = {-0.0f, +0.0f, +0.0f, +0.0f};
|
||||
#endif
|
||||
|
||||
#if defined(BT_USE_SSE) || defined(BT_USE_NEON)
|
||||
const btSimdFloat4 ATTRIBUTE_ALIGNED16(v1000) = {1.0f, 0.0f, 0.0f, 0.0f};
|
||||
const btSimdFloat4 ATTRIBUTE_ALIGNED16(v0100) = {0.0f, 1.0f, 0.0f, 0.0f};
|
||||
const btSimdFloat4 ATTRIBUTE_ALIGNED16(v0010) = {0.0f, 0.0f, 1.0f, 0.0f};
|
||||
#if defined(B3_USE_SSE) || defined(B3_USE_NEON)
|
||||
const b3SimdFloat4 ATTRIBUTE_ALIGNED16(v1000) = {1.0f, 0.0f, 0.0f, 0.0f};
|
||||
const b3SimdFloat4 ATTRIBUTE_ALIGNED16(v0100) = {0.0f, 1.0f, 0.0f, 0.0f};
|
||||
const b3SimdFloat4 ATTRIBUTE_ALIGNED16(v0010) = {0.0f, 0.0f, 1.0f, 0.0f};
|
||||
#endif
|
||||
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
#define btMatrix3x3Data btMatrix3x3DoubleData
|
||||
#ifdef B3_USE_DOUBLE_PRECISION
|
||||
#define b3Matrix3x3Data b3Matrix3x3DoubleData
|
||||
#else
|
||||
#define btMatrix3x3Data btMatrix3x3FloatData
|
||||
#endif //BT_USE_DOUBLE_PRECISION
|
||||
#define b3Matrix3x3Data b3Matrix3x3FloatData
|
||||
#endif //B3_USE_DOUBLE_PRECISION
|
||||
|
||||
|
||||
/**@brief The b3Matrix3x3 class implements a 3x3 rotation matrix, to perform linear algebra in combination with b3Quaternion, b3Transform and b3Vector3.
|
||||
@@ -70,8 +70,8 @@ public:
|
||||
zx, zy, zz);
|
||||
}
|
||||
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3 (const btSimdFloat4 v0, const btSimdFloat4 v1, const btSimdFloat4 v2 )
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3 (const b3SimdFloat4 v0, const b3SimdFloat4 v1, const b3SimdFloat4 v2 )
|
||||
{
|
||||
m_el[0].mVec128 = v0;
|
||||
m_el[1].mVec128 = v1;
|
||||
@@ -136,7 +136,7 @@ public:
|
||||
* @param i Row number 0 indexed */
|
||||
SIMD_FORCE_INLINE const b3Vector3& getRow(int i) const
|
||||
{
|
||||
btFullAssert(0 <= i && i < 3);
|
||||
b3FullAssert(0 <= i && i < 3);
|
||||
return m_el[i];
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ public:
|
||||
* @param i Row number 0 indexed */
|
||||
SIMD_FORCE_INLINE b3Vector3& operator[](int i)
|
||||
{
|
||||
btFullAssert(0 <= i && i < 3);
|
||||
b3FullAssert(0 <= i && i < 3);
|
||||
return m_el[i];
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@ public:
|
||||
* @param i Row number 0 indexed */
|
||||
SIMD_FORCE_INLINE const b3Vector3& operator[](int i) const
|
||||
{
|
||||
btFullAssert(0 <= i && i < 3);
|
||||
b3FullAssert(0 <= i && i < 3);
|
||||
return m_el[i];
|
||||
}
|
||||
|
||||
@@ -204,38 +204,38 @@ public:
|
||||
void setRotation(const b3Quaternion& q)
|
||||
{
|
||||
b3Scalar d = q.length2();
|
||||
btFullAssert(d != b3Scalar(0.0));
|
||||
b3FullAssert(d != b3Scalar(0.0));
|
||||
b3Scalar s = b3Scalar(2.0) / d;
|
||||
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vs, Q = q.get128();
|
||||
__m128i Qi = btCastfTo128i(Q);
|
||||
__m128i Qi = b3CastfTo128i(Q);
|
||||
__m128 Y, Z;
|
||||
__m128 V1, V2, V3;
|
||||
__m128 V11, V21, V31;
|
||||
__m128 NQ = _mm_xor_ps(Q, btvMzeroMask);
|
||||
__m128i NQi = btCastfTo128i(NQ);
|
||||
__m128 NQ = _mm_xor_ps(Q, b3vMzeroMask);
|
||||
__m128i NQi = b3CastfTo128i(NQ);
|
||||
|
||||
V1 = btCastiTo128f(_mm_shuffle_epi32 (Qi, BT_SHUFFLE(1,0,2,3))); // Y X Z W
|
||||
V2 = _mm_shuffle_ps(NQ, Q, BT_SHUFFLE(0,0,1,3)); // -X -X Y W
|
||||
V3 = btCastiTo128f(_mm_shuffle_epi32 (Qi, BT_SHUFFLE(2,1,0,3))); // Z Y X W
|
||||
V1 = b3CastiTo128f(_mm_shuffle_epi32 (Qi, B3_SHUFFLE(1,0,2,3))); // Y X Z W
|
||||
V2 = _mm_shuffle_ps(NQ, Q, B3_SHUFFLE(0,0,1,3)); // -X -X Y W
|
||||
V3 = b3CastiTo128f(_mm_shuffle_epi32 (Qi, B3_SHUFFLE(2,1,0,3))); // Z Y X W
|
||||
V1 = _mm_xor_ps(V1, vMPPP); // change the sign of the first element
|
||||
|
||||
V11 = btCastiTo128f(_mm_shuffle_epi32 (Qi, BT_SHUFFLE(1,1,0,3))); // Y Y X W
|
||||
V11 = b3CastiTo128f(_mm_shuffle_epi32 (Qi, B3_SHUFFLE(1,1,0,3))); // Y Y X W
|
||||
V21 = _mm_unpackhi_ps(Q, Q); // Z Z W W
|
||||
V31 = _mm_shuffle_ps(Q, NQ, BT_SHUFFLE(0,2,0,3)); // X Z -X -W
|
||||
V31 = _mm_shuffle_ps(Q, NQ, B3_SHUFFLE(0,2,0,3)); // X Z -X -W
|
||||
|
||||
V2 = V2 * V1; //
|
||||
V1 = V1 * V11; //
|
||||
V3 = V3 * V31; //
|
||||
|
||||
V11 = _mm_shuffle_ps(NQ, Q, BT_SHUFFLE(2,3,1,3)); // -Z -W Y W
|
||||
V11 = _mm_shuffle_ps(NQ, Q, B3_SHUFFLE(2,3,1,3)); // -Z -W Y W
|
||||
V11 = V11 * V21; //
|
||||
V21 = _mm_xor_ps(V21, vMPPP); // change the sign of the first element
|
||||
V31 = _mm_shuffle_ps(Q, NQ, BT_SHUFFLE(3,3,1,3)); // W W -Y -W
|
||||
V31 = _mm_shuffle_ps(Q, NQ, B3_SHUFFLE(3,3,1,3)); // W W -Y -W
|
||||
V31 = _mm_xor_ps(V31, vMPPP); // change the sign of the first element
|
||||
Y = btCastiTo128f(_mm_shuffle_epi32 (NQi, BT_SHUFFLE(3,2,0,3))); // -W -Z -X -W
|
||||
Z = btCastiTo128f(_mm_shuffle_epi32 (Qi, BT_SHUFFLE(1,0,1,3))); // Y X Y W
|
||||
Y = b3CastiTo128f(_mm_shuffle_epi32 (NQi, B3_SHUFFLE(3,2,0,3))); // -W -Z -X -W
|
||||
Z = b3CastiTo128f(_mm_shuffle_epi32 (Qi, B3_SHUFFLE(1,0,1,3))); // Y X Y W
|
||||
|
||||
vs = _mm_load_ss(&s);
|
||||
V21 = V21 * Y;
|
||||
@@ -245,7 +245,7 @@ public:
|
||||
V2 = V2 + V21;
|
||||
V3 = V3 + V31;
|
||||
|
||||
vs = bt_splat3_ps(vs, 0);
|
||||
vs = b3_splat3_ps(vs, 0);
|
||||
// s ready
|
||||
V1 = V1 * vs;
|
||||
V2 = V2 * vs;
|
||||
@@ -292,12 +292,12 @@ public:
|
||||
**/
|
||||
void setEulerZYX(b3Scalar eulerX,b3Scalar eulerY,b3Scalar eulerZ) {
|
||||
///@todo proposed to reverse this since it's labeled zyx but takes arguments xyz and it will match all other parts of the code
|
||||
b3Scalar ci ( btCos(eulerX));
|
||||
b3Scalar cj ( btCos(eulerY));
|
||||
b3Scalar ch ( btCos(eulerZ));
|
||||
b3Scalar si ( btSin(eulerX));
|
||||
b3Scalar sj ( btSin(eulerY));
|
||||
b3Scalar sh ( btSin(eulerZ));
|
||||
b3Scalar ci ( b3Cos(eulerX));
|
||||
b3Scalar cj ( b3Cos(eulerY));
|
||||
b3Scalar ch ( b3Cos(eulerZ));
|
||||
b3Scalar si ( b3Sin(eulerX));
|
||||
b3Scalar sj ( b3Sin(eulerY));
|
||||
b3Scalar sh ( b3Sin(eulerZ));
|
||||
b3Scalar cc = ci * ch;
|
||||
b3Scalar cs = ci * sh;
|
||||
b3Scalar sc = si * ch;
|
||||
@@ -311,7 +311,7 @@ public:
|
||||
/**@brief Set the matrix to the identity */
|
||||
void setIdentity()
|
||||
{
|
||||
#if (defined(BT_USE_SSE_IN_API)&& defined (BT_USE_SSE)) || defined(BT_USE_NEON)
|
||||
#if (defined(B3_USE_SSE_IN_API)&& defined (B3_USE_SSE)) || defined(B3_USE_NEON)
|
||||
m_el[0] = v1000;
|
||||
m_el[1] = v0100;
|
||||
m_el[2] = v0010;
|
||||
@@ -324,7 +324,7 @@ public:
|
||||
|
||||
static const b3Matrix3x3& getIdentity()
|
||||
{
|
||||
#if (defined(BT_USE_SSE_IN_API)&& defined (BT_USE_SSE)) || defined(BT_USE_NEON)
|
||||
#if (defined(B3_USE_SSE_IN_API)&& defined (B3_USE_SSE)) || defined(B3_USE_NEON)
|
||||
static const b3Matrix3x3
|
||||
identityMatrix(v1000, v0100, v0010);
|
||||
#else
|
||||
@@ -341,26 +341,26 @@ public:
|
||||
* @param m The array to be filled */
|
||||
void getOpenGLSubMatrix(b3Scalar *m) const
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 v0 = m_el[0].mVec128;
|
||||
__m128 v1 = m_el[1].mVec128;
|
||||
__m128 v2 = m_el[2].mVec128; // x2 y2 z2 w2
|
||||
__m128 *vm = (__m128 *)m;
|
||||
__m128 vT;
|
||||
|
||||
v2 = _mm_and_ps(v2, btvFFF0fMask); // x2 y2 z2 0
|
||||
v2 = _mm_and_ps(v2, b3vFFF0fMask); // x2 y2 z2 0
|
||||
|
||||
vT = _mm_unpackhi_ps(v0, v1); // z0 z1 * *
|
||||
v0 = _mm_unpacklo_ps(v0, v1); // x0 x1 y0 y1
|
||||
|
||||
v1 = _mm_shuffle_ps(v0, v2, BT_SHUFFLE(2, 3, 1, 3) ); // y0 y1 y2 0
|
||||
v0 = _mm_shuffle_ps(v0, v2, BT_SHUFFLE(0, 1, 0, 3) ); // x0 x1 x2 0
|
||||
v2 = btCastdTo128f(_mm_move_sd(btCastfTo128d(v2), btCastfTo128d(vT))); // z0 z1 z2 0
|
||||
v1 = _mm_shuffle_ps(v0, v2, B3_SHUFFLE(2, 3, 1, 3) ); // y0 y1 y2 0
|
||||
v0 = _mm_shuffle_ps(v0, v2, B3_SHUFFLE(0, 1, 0, 3) ); // x0 x1 x2 0
|
||||
v2 = b3CastdTo128f(_mm_move_sd(b3CastfTo128d(v2), b3CastfTo128d(vT))); // z0 z1 z2 0
|
||||
|
||||
vm[0] = v0;
|
||||
vm[1] = v1;
|
||||
vm[2] = v2;
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
// note: zeros the w channel. We can preserve it at the cost of two more vtrn instructions.
|
||||
static const uint32x2_t zMask = (const uint32x2_t) {-1, 0 };
|
||||
float32x4_t *vm = (float32x4_t *)m;
|
||||
@@ -394,12 +394,12 @@ public:
|
||||
* @param q The quaternion which will be set */
|
||||
void getRotation(b3Quaternion& q) const
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
b3Scalar trace = m_el[0].getX() + m_el[1].getY() + m_el[2].getZ();
|
||||
b3Scalar s, x;
|
||||
|
||||
union {
|
||||
btSimdFloat4 vec;
|
||||
b3SimdFloat4 vec;
|
||||
b3Scalar f[4];
|
||||
} temp;
|
||||
|
||||
@@ -440,7 +440,7 @@ public:
|
||||
//temp.f[i] = s * b3Scalar(0.5);
|
||||
}
|
||||
|
||||
s = btSqrt(x);
|
||||
s = b3Sqrt(x);
|
||||
q.set128(temp.vec);
|
||||
s = b3Scalar(0.5) / s;
|
||||
|
||||
@@ -452,7 +452,7 @@ public:
|
||||
|
||||
if (trace > b3Scalar(0.0))
|
||||
{
|
||||
b3Scalar s = btSqrt(trace + b3Scalar(1.0));
|
||||
b3Scalar s = b3Sqrt(trace + b3Scalar(1.0));
|
||||
temp[3]=(s * b3Scalar(0.5));
|
||||
s = b3Scalar(0.5) / s;
|
||||
|
||||
@@ -468,7 +468,7 @@ public:
|
||||
int j = (i + 1) % 3;
|
||||
int k = (i + 2) % 3;
|
||||
|
||||
b3Scalar s = btSqrt(m_el[i][i] - m_el[j][j] - m_el[k][k] + b3Scalar(1.0));
|
||||
b3Scalar s = b3Sqrt(m_el[i][i] - m_el[j][j] - m_el[k][k] + b3Scalar(1.0));
|
||||
temp[i] = s * b3Scalar(0.5);
|
||||
s = b3Scalar(0.5) / s;
|
||||
|
||||
@@ -488,12 +488,12 @@ public:
|
||||
{
|
||||
|
||||
// first use the normal calculus
|
||||
yaw = b3Scalar(btAtan2(m_el[1].getX(), m_el[0].getX()));
|
||||
pitch = b3Scalar(btAsin(-m_el[2].getX()));
|
||||
roll = b3Scalar(btAtan2(m_el[2].getY(), m_el[2].getZ()));
|
||||
yaw = b3Scalar(b3Atan2(m_el[1].getX(), m_el[0].getX()));
|
||||
pitch = b3Scalar(b3Asin(-m_el[2].getX()));
|
||||
roll = b3Scalar(b3Atan2(m_el[2].getY(), m_el[2].getZ()));
|
||||
|
||||
// on pitch = +/-HalfPI
|
||||
if (btFabs(pitch)==SIMD_HALF_PI)
|
||||
if (b3Fabs(pitch)==SIMD_HALF_PI)
|
||||
{
|
||||
if (yaw>0)
|
||||
yaw-=SIMD_PI;
|
||||
@@ -527,13 +527,13 @@ public:
|
||||
//get the pointer to the raw data
|
||||
|
||||
// Check that pitch is not at a singularity
|
||||
if (btFabs(m_el[2].getX()) >= 1)
|
||||
if (b3Fabs(m_el[2].getX()) >= 1)
|
||||
{
|
||||
euler_out.yaw = 0;
|
||||
euler_out2.yaw = 0;
|
||||
|
||||
// From difference of angles formula
|
||||
b3Scalar delta = btAtan2(m_el[0].getX(),m_el[0].getZ());
|
||||
b3Scalar delta = b3Atan2(m_el[0].getX(),m_el[0].getZ());
|
||||
if (m_el[2].getX() > 0) //gimbal locked up
|
||||
{
|
||||
euler_out.pitch = SIMD_PI / b3Scalar(2.0);
|
||||
@@ -551,18 +551,18 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
euler_out.pitch = - btAsin(m_el[2].getX());
|
||||
euler_out.pitch = - b3Asin(m_el[2].getX());
|
||||
euler_out2.pitch = SIMD_PI - euler_out.pitch;
|
||||
|
||||
euler_out.roll = btAtan2(m_el[2].getY()/btCos(euler_out.pitch),
|
||||
m_el[2].getZ()/btCos(euler_out.pitch));
|
||||
euler_out2.roll = btAtan2(m_el[2].getY()/btCos(euler_out2.pitch),
|
||||
m_el[2].getZ()/btCos(euler_out2.pitch));
|
||||
euler_out.roll = b3Atan2(m_el[2].getY()/b3Cos(euler_out.pitch),
|
||||
m_el[2].getZ()/b3Cos(euler_out.pitch));
|
||||
euler_out2.roll = b3Atan2(m_el[2].getY()/b3Cos(euler_out2.pitch),
|
||||
m_el[2].getZ()/b3Cos(euler_out2.pitch));
|
||||
|
||||
euler_out.yaw = btAtan2(m_el[1].getX()/btCos(euler_out.pitch),
|
||||
m_el[0].getX()/btCos(euler_out.pitch));
|
||||
euler_out2.yaw = btAtan2(m_el[1].getX()/btCos(euler_out2.pitch),
|
||||
m_el[0].getX()/btCos(euler_out2.pitch));
|
||||
euler_out.yaw = b3Atan2(m_el[1].getX()/b3Cos(euler_out.pitch),
|
||||
m_el[0].getX()/b3Cos(euler_out.pitch));
|
||||
euler_out2.yaw = b3Atan2(m_el[1].getX()/b3Cos(euler_out2.pitch),
|
||||
m_el[0].getX()/b3Cos(euler_out2.pitch));
|
||||
}
|
||||
|
||||
if (solution_number == 1)
|
||||
@@ -584,7 +584,7 @@ public:
|
||||
|
||||
b3Matrix3x3 scaled(const b3Vector3& s) const
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
return b3Matrix3x3(m_el[0] * s, m_el[1] * s, m_el[2] * s);
|
||||
#else
|
||||
return b3Matrix3x3(
|
||||
@@ -640,15 +640,15 @@ public:
|
||||
int p = 0;
|
||||
int q = 1;
|
||||
int r = 2;
|
||||
b3Scalar max = btFabs(m_el[0][1]);
|
||||
b3Scalar v = btFabs(m_el[0][2]);
|
||||
b3Scalar max = b3Fabs(m_el[0][1]);
|
||||
b3Scalar v = b3Fabs(m_el[0][2]);
|
||||
if (v > max)
|
||||
{
|
||||
q = 2;
|
||||
r = 1;
|
||||
max = v;
|
||||
}
|
||||
v = btFabs(m_el[1][2]);
|
||||
v = b3Fabs(m_el[1][2]);
|
||||
if (v > max)
|
||||
{
|
||||
p = 1;
|
||||
@@ -657,7 +657,7 @@ public:
|
||||
max = v;
|
||||
}
|
||||
|
||||
b3Scalar t = threshold * (btFabs(m_el[0][0]) + btFabs(m_el[1][1]) + btFabs(m_el[2][2]));
|
||||
b3Scalar t = threshold * (b3Fabs(m_el[0][0]) + b3Fabs(m_el[1][1]) + b3Fabs(m_el[2][2]));
|
||||
if (max <= t)
|
||||
{
|
||||
if (max <= SIMD_EPSILON * t)
|
||||
@@ -675,9 +675,9 @@ public:
|
||||
b3Scalar sin;
|
||||
if (theta2 * theta2 < b3Scalar(10 / SIMD_EPSILON))
|
||||
{
|
||||
t = (theta >= 0) ? 1 / (theta + btSqrt(1 + theta2))
|
||||
: 1 / (theta - btSqrt(1 + theta2));
|
||||
cos = 1 / btSqrt(1 + t * t);
|
||||
t = (theta >= 0) ? 1 / (theta + b3Sqrt(1 + theta2))
|
||||
: 1 / (theta - b3Sqrt(1 + theta2));
|
||||
cos = 1 / b3Sqrt(1 + t * t);
|
||||
sin = cos * t;
|
||||
}
|
||||
else
|
||||
@@ -724,15 +724,15 @@ public:
|
||||
return m_el[r1][c1] * m_el[r2][c2] - m_el[r1][c2] * m_el[r2][c1];
|
||||
}
|
||||
|
||||
void serialize(struct btMatrix3x3Data& dataOut) const;
|
||||
void serialize(struct b3Matrix3x3Data& dataOut) const;
|
||||
|
||||
void serializeFloat(struct btMatrix3x3FloatData& dataOut) const;
|
||||
void serializeFloat(struct b3Matrix3x3FloatData& dataOut) const;
|
||||
|
||||
void deSerialize(const struct btMatrix3x3Data& dataIn);
|
||||
void deSerialize(const struct b3Matrix3x3Data& dataIn);
|
||||
|
||||
void deSerializeFloat(const struct btMatrix3x3FloatData& dataIn);
|
||||
void deSerializeFloat(const struct b3Matrix3x3FloatData& dataIn);
|
||||
|
||||
void deSerializeDouble(const struct btMatrix3x3DoubleData& dataIn);
|
||||
void deSerializeDouble(const struct b3Matrix3x3DoubleData& dataIn);
|
||||
|
||||
};
|
||||
|
||||
@@ -740,7 +740,7 @@ public:
|
||||
SIMD_FORCE_INLINE b3Matrix3x3&
|
||||
b3Matrix3x3::operator*=(const b3Matrix3x3& m)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 rv00, rv01, rv02;
|
||||
__m128 rv10, rv11, rv12;
|
||||
__m128 rv20, rv21, rv22;
|
||||
@@ -750,32 +750,32 @@ b3Matrix3x3::operator*=(const b3Matrix3x3& m)
|
||||
rv12 = m_el[1].mVec128;
|
||||
rv22 = m_el[2].mVec128;
|
||||
|
||||
mv0 = _mm_and_ps(m[0].mVec128, btvFFF0fMask);
|
||||
mv1 = _mm_and_ps(m[1].mVec128, btvFFF0fMask);
|
||||
mv2 = _mm_and_ps(m[2].mVec128, btvFFF0fMask);
|
||||
mv0 = _mm_and_ps(m[0].mVec128, b3vFFF0fMask);
|
||||
mv1 = _mm_and_ps(m[1].mVec128, b3vFFF0fMask);
|
||||
mv2 = _mm_and_ps(m[2].mVec128, b3vFFF0fMask);
|
||||
|
||||
// rv0
|
||||
rv00 = bt_splat_ps(rv02, 0);
|
||||
rv01 = bt_splat_ps(rv02, 1);
|
||||
rv02 = bt_splat_ps(rv02, 2);
|
||||
rv00 = b3_splat_ps(rv02, 0);
|
||||
rv01 = b3_splat_ps(rv02, 1);
|
||||
rv02 = b3_splat_ps(rv02, 2);
|
||||
|
||||
rv00 = _mm_mul_ps(rv00, mv0);
|
||||
rv01 = _mm_mul_ps(rv01, mv1);
|
||||
rv02 = _mm_mul_ps(rv02, mv2);
|
||||
|
||||
// rv1
|
||||
rv10 = bt_splat_ps(rv12, 0);
|
||||
rv11 = bt_splat_ps(rv12, 1);
|
||||
rv12 = bt_splat_ps(rv12, 2);
|
||||
rv10 = b3_splat_ps(rv12, 0);
|
||||
rv11 = b3_splat_ps(rv12, 1);
|
||||
rv12 = b3_splat_ps(rv12, 2);
|
||||
|
||||
rv10 = _mm_mul_ps(rv10, mv0);
|
||||
rv11 = _mm_mul_ps(rv11, mv1);
|
||||
rv12 = _mm_mul_ps(rv12, mv2);
|
||||
|
||||
// rv2
|
||||
rv20 = bt_splat_ps(rv22, 0);
|
||||
rv21 = bt_splat_ps(rv22, 1);
|
||||
rv22 = bt_splat_ps(rv22, 2);
|
||||
rv20 = b3_splat_ps(rv22, 0);
|
||||
rv21 = b3_splat_ps(rv22, 1);
|
||||
rv22 = b3_splat_ps(rv22, 2);
|
||||
|
||||
rv20 = _mm_mul_ps(rv20, mv0);
|
||||
rv21 = _mm_mul_ps(rv21, mv1);
|
||||
@@ -789,7 +789,7 @@ b3Matrix3x3::operator*=(const b3Matrix3x3& m)
|
||||
m_el[1].mVec128 = _mm_add_ps(rv10, rv12);
|
||||
m_el[2].mVec128 = _mm_add_ps(rv20, rv22);
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
|
||||
float32x4_t rv0, rv1, rv2;
|
||||
float32x4_t v0, v1, v2;
|
||||
@@ -799,9 +799,9 @@ b3Matrix3x3::operator*=(const b3Matrix3x3& m)
|
||||
v1 = m_el[1].mVec128;
|
||||
v2 = m_el[2].mVec128;
|
||||
|
||||
mv0 = (float32x4_t) vandq_s32((int32x4_t)m[0].mVec128, btvFFF0Mask);
|
||||
mv1 = (float32x4_t) vandq_s32((int32x4_t)m[1].mVec128, btvFFF0Mask);
|
||||
mv2 = (float32x4_t) vandq_s32((int32x4_t)m[2].mVec128, btvFFF0Mask);
|
||||
mv0 = (float32x4_t) vandq_s32((int32x4_t)m[0].mVec128, b3vFFF0Mask);
|
||||
mv1 = (float32x4_t) vandq_s32((int32x4_t)m[1].mVec128, b3vFFF0Mask);
|
||||
mv2 = (float32x4_t) vandq_s32((int32x4_t)m[2].mVec128, b3vFFF0Mask);
|
||||
|
||||
rv0 = vmulq_lane_f32(mv0, vget_low_f32(v0), 0);
|
||||
rv1 = vmulq_lane_f32(mv0, vget_low_f32(v1), 0);
|
||||
@@ -830,7 +830,7 @@ b3Matrix3x3::operator*=(const b3Matrix3x3& m)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3&
|
||||
b3Matrix3x3::operator+=(const b3Matrix3x3& m)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
m_el[0].mVec128 = m_el[0].mVec128 + m.m_el[0].mVec128;
|
||||
m_el[1].mVec128 = m_el[1].mVec128 + m.m_el[1].mVec128;
|
||||
m_el[2].mVec128 = m_el[2].mVec128 + m.m_el[2].mVec128;
|
||||
@@ -852,13 +852,13 @@ b3Matrix3x3::operator+=(const b3Matrix3x3& m)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
operator*(const b3Matrix3x3& m, const b3Scalar & k)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
__m128 vk = bt_splat_ps(_mm_load_ss((float *)&k), 0x80);
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
__m128 vk = b3_splat_ps(_mm_load_ss((float *)&k), 0x80);
|
||||
return b3Matrix3x3(
|
||||
_mm_mul_ps(m[0].mVec128, vk),
|
||||
_mm_mul_ps(m[1].mVec128, vk),
|
||||
_mm_mul_ps(m[2].mVec128, vk));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Matrix3x3(
|
||||
vmulq_n_f32(m[0].mVec128, k),
|
||||
vmulq_n_f32(m[1].mVec128, k),
|
||||
@@ -874,7 +874,7 @@ operator*(const b3Matrix3x3& m, const b3Scalar & k)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
operator+(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
return b3Matrix3x3(
|
||||
m1[0].mVec128 + m2[0].mVec128,
|
||||
m1[1].mVec128 + m2[1].mVec128,
|
||||
@@ -898,7 +898,7 @@ operator+(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
operator-(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
return b3Matrix3x3(
|
||||
m1[0].mVec128 - m2[0].mVec128,
|
||||
m1[1].mVec128 - m2[1].mVec128,
|
||||
@@ -923,7 +923,7 @@ operator-(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3&
|
||||
b3Matrix3x3::operator-=(const b3Matrix3x3& m)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
m_el[0].mVec128 = m_el[0].mVec128 - m.m_el[0].mVec128;
|
||||
m_el[1].mVec128 = m_el[1].mVec128 - m.m_el[1].mVec128;
|
||||
m_el[2].mVec128 = m_el[2].mVec128 - m.m_el[2].mVec128;
|
||||
@@ -946,52 +946,52 @@ b3Matrix3x3::operator-=(const b3Matrix3x3& m)
|
||||
SIMD_FORCE_INLINE b3Scalar
|
||||
b3Matrix3x3::determinant() const
|
||||
{
|
||||
return btTriple((*this)[0], (*this)[1], (*this)[2]);
|
||||
return b3Triple((*this)[0], (*this)[1], (*this)[2]);
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
b3Matrix3x3::absolute() const
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
return b3Matrix3x3(
|
||||
_mm_and_ps(m_el[0].mVec128, btvAbsfMask),
|
||||
_mm_and_ps(m_el[1].mVec128, btvAbsfMask),
|
||||
_mm_and_ps(m_el[2].mVec128, btvAbsfMask));
|
||||
#elif defined(BT_USE_NEON)
|
||||
_mm_and_ps(m_el[0].mVec128, b3vAbsfMask),
|
||||
_mm_and_ps(m_el[1].mVec128, b3vAbsfMask),
|
||||
_mm_and_ps(m_el[2].mVec128, b3vAbsfMask));
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Matrix3x3(
|
||||
(float32x4_t)vandq_s32((int32x4_t)m_el[0].mVec128, btv3AbsMask),
|
||||
(float32x4_t)vandq_s32((int32x4_t)m_el[1].mVec128, btv3AbsMask),
|
||||
(float32x4_t)vandq_s32((int32x4_t)m_el[2].mVec128, btv3AbsMask));
|
||||
(float32x4_t)vandq_s32((int32x4_t)m_el[0].mVec128, b3v3AbsMask),
|
||||
(float32x4_t)vandq_s32((int32x4_t)m_el[1].mVec128, b3v3AbsMask),
|
||||
(float32x4_t)vandq_s32((int32x4_t)m_el[2].mVec128, b3v3AbsMask));
|
||||
#else
|
||||
return b3Matrix3x3(
|
||||
btFabs(m_el[0].getX()), btFabs(m_el[0].getY()), btFabs(m_el[0].getZ()),
|
||||
btFabs(m_el[1].getX()), btFabs(m_el[1].getY()), btFabs(m_el[1].getZ()),
|
||||
btFabs(m_el[2].getX()), btFabs(m_el[2].getY()), btFabs(m_el[2].getZ()));
|
||||
b3Fabs(m_el[0].getX()), b3Fabs(m_el[0].getY()), b3Fabs(m_el[0].getZ()),
|
||||
b3Fabs(m_el[1].getX()), b3Fabs(m_el[1].getY()), b3Fabs(m_el[1].getZ()),
|
||||
b3Fabs(m_el[2].getX()), b3Fabs(m_el[2].getY()), b3Fabs(m_el[2].getZ()));
|
||||
#endif
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
b3Matrix3x3::transpose() const
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
__m128 v0 = m_el[0].mVec128;
|
||||
__m128 v1 = m_el[1].mVec128;
|
||||
__m128 v2 = m_el[2].mVec128; // x2 y2 z2 w2
|
||||
__m128 vT;
|
||||
|
||||
v2 = _mm_and_ps(v2, btvFFF0fMask); // x2 y2 z2 0
|
||||
v2 = _mm_and_ps(v2, b3vFFF0fMask); // x2 y2 z2 0
|
||||
|
||||
vT = _mm_unpackhi_ps(v0, v1); // z0 z1 * *
|
||||
v0 = _mm_unpacklo_ps(v0, v1); // x0 x1 y0 y1
|
||||
|
||||
v1 = _mm_shuffle_ps(v0, v2, BT_SHUFFLE(2, 3, 1, 3) ); // y0 y1 y2 0
|
||||
v0 = _mm_shuffle_ps(v0, v2, BT_SHUFFLE(0, 1, 0, 3) ); // x0 x1 x2 0
|
||||
v2 = btCastdTo128f(_mm_move_sd(btCastfTo128d(v2), btCastfTo128d(vT))); // z0 z1 z2 0
|
||||
v1 = _mm_shuffle_ps(v0, v2, B3_SHUFFLE(2, 3, 1, 3) ); // y0 y1 y2 0
|
||||
v0 = _mm_shuffle_ps(v0, v2, B3_SHUFFLE(0, 1, 0, 3) ); // x0 x1 x2 0
|
||||
v2 = b3CastdTo128f(_mm_move_sd(b3CastfTo128d(v2), b3CastfTo128d(vT))); // z0 z1 z2 0
|
||||
|
||||
|
||||
return b3Matrix3x3( v0, v1, v2 );
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
// note: zeros the w channel. We can preserve it at the cost of two more vtrn instructions.
|
||||
static const uint32x2_t zMask = (const uint32x2_t) {-1, 0 };
|
||||
float32x4x2_t top = vtrnq_f32( m_el[0].mVec128, m_el[1].mVec128 ); // {x0 x1 z0 z1}, {y0 y1 w0 w1}
|
||||
@@ -1021,7 +1021,7 @@ b3Matrix3x3::inverse() const
|
||||
{
|
||||
b3Vector3 co(cofac(1, 1, 2, 2), cofac(1, 2, 2, 0), cofac(1, 0, 2, 1));
|
||||
b3Scalar det = (*this)[0].dot(co);
|
||||
btFullAssert(det != b3Scalar(0.0));
|
||||
b3FullAssert(det != b3Scalar(0.0));
|
||||
b3Scalar s = b3Scalar(1.0) / det;
|
||||
return b3Matrix3x3(co.getX() * s, cofac(0, 2, 2, 1) * s, cofac(0, 1, 1, 2) * s,
|
||||
co.getY() * s, cofac(0, 0, 2, 2) * s, cofac(0, 2, 1, 0) * s,
|
||||
@@ -1031,13 +1031,13 @@ b3Matrix3x3::inverse() const
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
b3Matrix3x3::transposeTimes(const b3Matrix3x3& m) const
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
// zeros w
|
||||
// static const __m128i xyzMask = (const __m128i){ -1ULL, 0xffffffffULL };
|
||||
__m128 row = m_el[0].mVec128;
|
||||
__m128 m0 = _mm_and_ps( m.getRow(0).mVec128, btvFFF0fMask );
|
||||
__m128 m1 = _mm_and_ps( m.getRow(1).mVec128, btvFFF0fMask);
|
||||
__m128 m2 = _mm_and_ps( m.getRow(2).mVec128, btvFFF0fMask );
|
||||
__m128 m0 = _mm_and_ps( m.getRow(0).mVec128, b3vFFF0fMask );
|
||||
__m128 m1 = _mm_and_ps( m.getRow(1).mVec128, b3vFFF0fMask);
|
||||
__m128 m2 = _mm_and_ps( m.getRow(2).mVec128, b3vFFF0fMask );
|
||||
__m128 r0 = _mm_mul_ps(m0, _mm_shuffle_ps(row, row, 0));
|
||||
__m128 r1 = _mm_mul_ps(m0, _mm_shuffle_ps(row, row, 0x55));
|
||||
__m128 r2 = _mm_mul_ps(m0, _mm_shuffle_ps(row, row, 0xaa));
|
||||
@@ -1051,7 +1051,7 @@ b3Matrix3x3::transposeTimes(const b3Matrix3x3& m) const
|
||||
r2 = _mm_add_ps( r2, _mm_mul_ps(m2, _mm_shuffle_ps(row, row, 0xaa)));
|
||||
return b3Matrix3x3( r0, r1, r2 );
|
||||
|
||||
#elif defined BT_USE_NEON
|
||||
#elif defined B3_USE_NEON
|
||||
// zeros w
|
||||
static const uint32x4_t xyzMask = (const uint32x4_t){ -1, -1, -1, 0 };
|
||||
float32x4_t m0 = (float32x4_t) vandq_u32( (uint32x4_t) m.getRow(0).mVec128, xyzMask );
|
||||
@@ -1087,7 +1087,7 @@ b3Matrix3x3::transposeTimes(const b3Matrix3x3& m) const
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
b3Matrix3x3::timesTranspose(const b3Matrix3x3& m) const
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
__m128 a0 = m_el[0].mVec128;
|
||||
__m128 a1 = m_el[1].mVec128;
|
||||
__m128 a2 = m_el[2].mVec128;
|
||||
@@ -1108,7 +1108,7 @@ b3Matrix3x3::timesTranspose(const b3Matrix3x3& m) const
|
||||
r2 = _mm_add_ps(r2, _mm_mul_ps(mz, _mm_shuffle_ps(a2, a2, 0xaa)));
|
||||
return b3Matrix3x3( r0, r1, r2);
|
||||
|
||||
#elif defined BT_USE_NEON
|
||||
#elif defined B3_USE_NEON
|
||||
float32x4_t a0 = m_el[0].mVec128;
|
||||
float32x4_t a1 = m_el[1].mVec128;
|
||||
float32x4_t a2 = m_el[2].mVec128;
|
||||
@@ -1140,7 +1140,7 @@ b3Matrix3x3::timesTranspose(const b3Matrix3x3& m) const
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator*(const b3Matrix3x3& m, const b3Vector3& v)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))|| defined (B3_USE_NEON)
|
||||
return v.dot3(m[0], m[1], m[2]);
|
||||
#else
|
||||
return b3Vector3(m[0].dot(v), m[1].dot(v), m[2].dot(v));
|
||||
@@ -1151,30 +1151,30 @@ operator*(const b3Matrix3x3& m, const b3Vector3& v)
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator*(const b3Vector3& v, const b3Matrix3x3& m)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
|
||||
const __m128 vv = v.mVec128;
|
||||
|
||||
__m128 c0 = bt_splat_ps( vv, 0);
|
||||
__m128 c1 = bt_splat_ps( vv, 1);
|
||||
__m128 c2 = bt_splat_ps( vv, 2);
|
||||
__m128 c0 = b3_splat_ps( vv, 0);
|
||||
__m128 c1 = b3_splat_ps( vv, 1);
|
||||
__m128 c2 = b3_splat_ps( vv, 2);
|
||||
|
||||
c0 = _mm_mul_ps(c0, _mm_and_ps(m[0].mVec128, btvFFF0fMask) );
|
||||
c1 = _mm_mul_ps(c1, _mm_and_ps(m[1].mVec128, btvFFF0fMask) );
|
||||
c0 = _mm_mul_ps(c0, _mm_and_ps(m[0].mVec128, b3vFFF0fMask) );
|
||||
c1 = _mm_mul_ps(c1, _mm_and_ps(m[1].mVec128, b3vFFF0fMask) );
|
||||
c0 = _mm_add_ps(c0, c1);
|
||||
c2 = _mm_mul_ps(c2, _mm_and_ps(m[2].mVec128, btvFFF0fMask) );
|
||||
c2 = _mm_mul_ps(c2, _mm_and_ps(m[2].mVec128, b3vFFF0fMask) );
|
||||
|
||||
return b3Vector3(_mm_add_ps(c0, c2));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
const float32x4_t vv = v.mVec128;
|
||||
const float32x2_t vlo = vget_low_f32(vv);
|
||||
const float32x2_t vhi = vget_high_f32(vv);
|
||||
|
||||
float32x4_t c0, c1, c2;
|
||||
|
||||
c0 = (float32x4_t) vandq_s32((int32x4_t)m[0].mVec128, btvFFF0Mask);
|
||||
c1 = (float32x4_t) vandq_s32((int32x4_t)m[1].mVec128, btvFFF0Mask);
|
||||
c2 = (float32x4_t) vandq_s32((int32x4_t)m[2].mVec128, btvFFF0Mask);
|
||||
c0 = (float32x4_t) vandq_s32((int32x4_t)m[0].mVec128, b3vFFF0Mask);
|
||||
c1 = (float32x4_t) vandq_s32((int32x4_t)m[1].mVec128, b3vFFF0Mask);
|
||||
c2 = (float32x4_t) vandq_s32((int32x4_t)m[2].mVec128, b3vFFF0Mask);
|
||||
|
||||
c0 = vmulq_lane_f32(c0, vlo, 0);
|
||||
c1 = vmulq_lane_f32(c1, vlo, 1);
|
||||
@@ -1191,41 +1191,41 @@ operator*(const b3Vector3& v, const b3Matrix3x3& m)
|
||||
SIMD_FORCE_INLINE b3Matrix3x3
|
||||
operator*(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
|
||||
__m128 m10 = m1[0].mVec128;
|
||||
__m128 m11 = m1[1].mVec128;
|
||||
__m128 m12 = m1[2].mVec128;
|
||||
|
||||
__m128 m2v = _mm_and_ps(m2[0].mVec128, btvFFF0fMask);
|
||||
__m128 m2v = _mm_and_ps(m2[0].mVec128, b3vFFF0fMask);
|
||||
|
||||
__m128 c0 = bt_splat_ps( m10, 0);
|
||||
__m128 c1 = bt_splat_ps( m11, 0);
|
||||
__m128 c2 = bt_splat_ps( m12, 0);
|
||||
__m128 c0 = b3_splat_ps( m10, 0);
|
||||
__m128 c1 = b3_splat_ps( m11, 0);
|
||||
__m128 c2 = b3_splat_ps( m12, 0);
|
||||
|
||||
c0 = _mm_mul_ps(c0, m2v);
|
||||
c1 = _mm_mul_ps(c1, m2v);
|
||||
c2 = _mm_mul_ps(c2, m2v);
|
||||
|
||||
m2v = _mm_and_ps(m2[1].mVec128, btvFFF0fMask);
|
||||
m2v = _mm_and_ps(m2[1].mVec128, b3vFFF0fMask);
|
||||
|
||||
__m128 c0_1 = bt_splat_ps( m10, 1);
|
||||
__m128 c1_1 = bt_splat_ps( m11, 1);
|
||||
__m128 c2_1 = bt_splat_ps( m12, 1);
|
||||
__m128 c0_1 = b3_splat_ps( m10, 1);
|
||||
__m128 c1_1 = b3_splat_ps( m11, 1);
|
||||
__m128 c2_1 = b3_splat_ps( m12, 1);
|
||||
|
||||
c0_1 = _mm_mul_ps(c0_1, m2v);
|
||||
c1_1 = _mm_mul_ps(c1_1, m2v);
|
||||
c2_1 = _mm_mul_ps(c2_1, m2v);
|
||||
|
||||
m2v = _mm_and_ps(m2[2].mVec128, btvFFF0fMask);
|
||||
m2v = _mm_and_ps(m2[2].mVec128, b3vFFF0fMask);
|
||||
|
||||
c0 = _mm_add_ps(c0, c0_1);
|
||||
c1 = _mm_add_ps(c1, c1_1);
|
||||
c2 = _mm_add_ps(c2, c2_1);
|
||||
|
||||
m10 = bt_splat_ps( m10, 2);
|
||||
m11 = bt_splat_ps( m11, 2);
|
||||
m12 = bt_splat_ps( m12, 2);
|
||||
m10 = b3_splat_ps( m10, 2);
|
||||
m11 = b3_splat_ps( m11, 2);
|
||||
m12 = b3_splat_ps( m12, 2);
|
||||
|
||||
m10 = _mm_mul_ps(m10, m2v);
|
||||
m11 = _mm_mul_ps(m11, m2v);
|
||||
@@ -1237,7 +1237,7 @@ operator*(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
|
||||
return b3Matrix3x3(c0, c1, c2);
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
|
||||
float32x4_t rv0, rv1, rv2;
|
||||
float32x4_t v0, v1, v2;
|
||||
@@ -1247,9 +1247,9 @@ operator*(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
v1 = m1[1].mVec128;
|
||||
v2 = m1[2].mVec128;
|
||||
|
||||
mv0 = (float32x4_t) vandq_s32((int32x4_t)m2[0].mVec128, btvFFF0Mask);
|
||||
mv1 = (float32x4_t) vandq_s32((int32x4_t)m2[1].mVec128, btvFFF0Mask);
|
||||
mv2 = (float32x4_t) vandq_s32((int32x4_t)m2[2].mVec128, btvFFF0Mask);
|
||||
mv0 = (float32x4_t) vandq_s32((int32x4_t)m2[0].mVec128, b3vFFF0Mask);
|
||||
mv1 = (float32x4_t) vandq_s32((int32x4_t)m2[1].mVec128, b3vFFF0Mask);
|
||||
mv2 = (float32x4_t) vandq_s32((int32x4_t)m2[2].mVec128, b3vFFF0Mask);
|
||||
|
||||
rv0 = vmulq_lane_f32(mv0, vget_low_f32(v0), 0);
|
||||
rv1 = vmulq_lane_f32(mv0, vget_low_f32(v1), 0);
|
||||
@@ -1274,7 +1274,7 @@ operator*(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
}
|
||||
|
||||
/*
|
||||
SIMD_FORCE_INLINE b3Matrix3x3 btMultTransposeLeft(const b3Matrix3x3& m1, const b3Matrix3x3& m2) {
|
||||
SIMD_FORCE_INLINE b3Matrix3x3 b3MultTransposeLeft(const b3Matrix3x3& m1, const b3Matrix3x3& m2) {
|
||||
return b3Matrix3x3(
|
||||
m1[0][0] * m2[0][0] + m1[1][0] * m2[1][0] + m1[2][0] * m2[2][0],
|
||||
m1[0][0] * m2[0][1] + m1[1][0] * m2[1][1] + m1[2][0] * m2[2][1],
|
||||
@@ -1292,7 +1292,7 @@ m1[0][2] * m2[0][2] + m1[1][2] * m2[1][2] + m1[2][2] * m2[2][2]);
|
||||
* It will test all elements are equal. */
|
||||
SIMD_FORCE_INLINE bool operator==(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
{
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
|
||||
__m128 c0, c1, c2;
|
||||
|
||||
@@ -1313,50 +1313,50 @@ SIMD_FORCE_INLINE bool operator==(const b3Matrix3x3& m1, const b3Matrix3x3& m2)
|
||||
}
|
||||
|
||||
///for serialization
|
||||
struct btMatrix3x3FloatData
|
||||
struct b3Matrix3x3FloatData
|
||||
{
|
||||
btVector3FloatData m_el[3];
|
||||
b3Vector3FloatData m_el[3];
|
||||
};
|
||||
|
||||
///for serialization
|
||||
struct btMatrix3x3DoubleData
|
||||
struct b3Matrix3x3DoubleData
|
||||
{
|
||||
btVector3DoubleData m_el[3];
|
||||
b3Vector3DoubleData m_el[3];
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::serialize(struct btMatrix3x3Data& dataOut) const
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::serialize(struct b3Matrix3x3Data& dataOut) const
|
||||
{
|
||||
for (int i=0;i<3;i++)
|
||||
m_el[i].serialize(dataOut.m_el[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::serializeFloat(struct btMatrix3x3FloatData& dataOut) const
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::serializeFloat(struct b3Matrix3x3FloatData& dataOut) const
|
||||
{
|
||||
for (int i=0;i<3;i++)
|
||||
m_el[i].serializeFloat(dataOut.m_el[i]);
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::deSerialize(const struct btMatrix3x3Data& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::deSerialize(const struct b3Matrix3x3Data& dataIn)
|
||||
{
|
||||
for (int i=0;i<3;i++)
|
||||
m_el[i].deSerialize(dataIn.m_el[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::deSerializeFloat(const struct btMatrix3x3FloatData& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::deSerializeFloat(const struct b3Matrix3x3FloatData& dataIn)
|
||||
{
|
||||
for (int i=0;i<3;i++)
|
||||
m_el[i].deSerializeFloat(dataIn.m_el[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::deSerializeDouble(const struct btMatrix3x3DoubleData& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Matrix3x3::deSerializeDouble(const struct b3Matrix3x3DoubleData& dataIn)
|
||||
{
|
||||
for (int i=0;i<3;i++)
|
||||
m_el[i].deSerializeDouble(dataIn.m_el[i]);
|
||||
}
|
||||
|
||||
#endif //BT_MATRIX3x3_H
|
||||
#endif //B3_MATRIX3x3_H
|
||||
|
||||
|
||||
@@ -14,31 +14,31 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
#ifndef BT_GEN_MINMAX_H
|
||||
#define BT_GEN_MINMAX_H
|
||||
#ifndef B3_GEN_MINMAX_H
|
||||
#define B3_GEN_MINMAX_H
|
||||
|
||||
#include "b3Scalar.h"
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b)
|
||||
SIMD_FORCE_INLINE const T& b3Min(const T& a, const T& b)
|
||||
{
|
||||
return a < b ? a : b ;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b)
|
||||
SIMD_FORCE_INLINE const T& b3Max(const T& a, const T& b)
|
||||
{
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub)
|
||||
SIMD_FORCE_INLINE const T& b3Clamped(const T& a, const T& lb, const T& ub)
|
||||
{
|
||||
return a < lb ? lb : (ub < a ? ub : a);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btSetMin(T& a, const T& b)
|
||||
SIMD_FORCE_INLINE void b3SetMin(T& a, const T& b)
|
||||
{
|
||||
if (b < a)
|
||||
{
|
||||
@@ -47,7 +47,7 @@ SIMD_FORCE_INLINE void btSetMin(T& a, const T& b)
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btSetMax(T& a, const T& b)
|
||||
SIMD_FORCE_INLINE void b3SetMax(T& a, const T& b)
|
||||
{
|
||||
if (a < b)
|
||||
{
|
||||
@@ -56,7 +56,7 @@ SIMD_FORCE_INLINE void btSetMax(T& a, const T& b)
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub)
|
||||
SIMD_FORCE_INLINE void b3Clamp(T& a, const T& lb, const T& ub)
|
||||
{
|
||||
if (a < lb)
|
||||
{
|
||||
@@ -68,4 +68,4 @@ SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub)
|
||||
}
|
||||
}
|
||||
|
||||
#endif //BT_GEN_MINMAX_H
|
||||
#endif //B3_GEN_MINMAX_H
|
||||
|
||||
@@ -34,7 +34,7 @@ public:
|
||||
:m_elemSize(elemSize),
|
||||
m_maxElements(maxElements)
|
||||
{
|
||||
m_pool = (unsigned char*) btAlignedAlloc( static_cast<unsigned int>(m_elemSize*m_maxElements),16);
|
||||
m_pool = (unsigned char*) b3AlignedAlloc( static_cast<unsigned int>(m_elemSize*m_maxElements),16);
|
||||
|
||||
unsigned char* p = m_pool;
|
||||
m_firstFree = p;
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
|
||||
~b3PoolAllocator()
|
||||
{
|
||||
btAlignedFree( m_pool);
|
||||
b3AlignedFree( m_pool);
|
||||
}
|
||||
|
||||
int getFreeCount() const
|
||||
@@ -71,8 +71,8 @@ public:
|
||||
{
|
||||
// release mode fix
|
||||
(void)size;
|
||||
btAssert(!size || size<=m_elemSize);
|
||||
btAssert(m_freeCount>0);
|
||||
b3Assert(!size || size<=m_elemSize);
|
||||
b3Assert(m_freeCount>0);
|
||||
void* result = m_firstFree;
|
||||
m_firstFree = *(void**)m_firstFree;
|
||||
--m_freeCount;
|
||||
@@ -93,7 +93,7 @@ public:
|
||||
void freeMemory(void* ptr)
|
||||
{
|
||||
if (ptr) {
|
||||
btAssert((unsigned char*)ptr >= m_pool && (unsigned char*)ptr < m_pool + m_maxElements * m_elemSize);
|
||||
b3Assert((unsigned char*)ptr >= m_pool && (unsigned char*)ptr < m_pool + m_maxElements * m_elemSize);
|
||||
|
||||
*(void**)ptr = m_firstFree;
|
||||
m_firstFree = ptr;
|
||||
|
||||
@@ -13,8 +13,8 @@ subject to the following restrictions:
|
||||
*/
|
||||
|
||||
|
||||
#ifndef BT_SIMD_QUADWORD_H
|
||||
#define BT_SIMD_QUADWORD_H
|
||||
#ifndef B3_SIMD_QUADWORD_H
|
||||
#define B3_SIMD_QUADWORD_H
|
||||
|
||||
#include "b3Scalar.h"
|
||||
#include "b3MinMax.h"
|
||||
@@ -51,33 +51,33 @@ public:
|
||||
|
||||
#else //__CELLOS_LV2__ __SPU__
|
||||
|
||||
#if defined(BT_USE_SSE) || defined(BT_USE_NEON)
|
||||
#if defined(B3_USE_SSE) || defined(B3_USE_NEON)
|
||||
union {
|
||||
btSimdFloat4 mVec128;
|
||||
b3SimdFloat4 mVec128;
|
||||
b3Scalar m_floats[4];
|
||||
struct {b3Scalar x,y,z,w;};
|
||||
};
|
||||
public:
|
||||
SIMD_FORCE_INLINE btSimdFloat4 get128() const
|
||||
SIMD_FORCE_INLINE b3SimdFloat4 get128() const
|
||||
{
|
||||
return mVec128;
|
||||
}
|
||||
SIMD_FORCE_INLINE void set128(btSimdFloat4 v128)
|
||||
SIMD_FORCE_INLINE void set128(b3SimdFloat4 v128)
|
||||
{
|
||||
mVec128 = v128;
|
||||
}
|
||||
#else
|
||||
b3Scalar m_floats[4];
|
||||
#endif // BT_USE_SSE
|
||||
#endif // B3_USE_SSE
|
||||
|
||||
#endif //__CELLOS_LV2__ __SPU__
|
||||
|
||||
public:
|
||||
|
||||
#if defined(BT_USE_SSE) || defined(BT_USE_NEON)
|
||||
#if defined(B3_USE_SSE) || defined(B3_USE_NEON)
|
||||
|
||||
// Set Vector
|
||||
SIMD_FORCE_INLINE b3QuadWord(const btSimdFloat4 vec)
|
||||
SIMD_FORCE_INLINE b3QuadWord(const b3SimdFloat4 vec)
|
||||
{
|
||||
mVec128 = vec;
|
||||
}
|
||||
@@ -124,7 +124,7 @@ public:
|
||||
|
||||
SIMD_FORCE_INLINE bool operator==(const b3QuadWord& other) const
|
||||
{
|
||||
#ifdef BT_USE_SSE
|
||||
#ifdef B3_USE_SSE
|
||||
return (0xf == _mm_movemask_ps((__m128)_mm_cmpeq_ps(mVec128, other.mVec128)));
|
||||
#else
|
||||
return ((m_floats[3]==other.m_floats[3]) &&
|
||||
@@ -204,15 +204,15 @@ public:
|
||||
*/
|
||||
SIMD_FORCE_INLINE void setMax(const b3QuadWord& other)
|
||||
{
|
||||
#ifdef BT_USE_SSE
|
||||
#ifdef B3_USE_SSE
|
||||
mVec128 = _mm_max_ps(mVec128, other.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vmaxq_f32(mVec128, other.mVec128);
|
||||
#else
|
||||
btSetMax(m_floats[0], other.m_floats[0]);
|
||||
btSetMax(m_floats[1], other.m_floats[1]);
|
||||
btSetMax(m_floats[2], other.m_floats[2]);
|
||||
btSetMax(m_floats[3], other.m_floats[3]);
|
||||
b3SetMax(m_floats[0], other.m_floats[0]);
|
||||
b3SetMax(m_floats[1], other.m_floats[1]);
|
||||
b3SetMax(m_floats[2], other.m_floats[2]);
|
||||
b3SetMax(m_floats[3], other.m_floats[3]);
|
||||
#endif
|
||||
}
|
||||
/**@brief Set each element to the min of the current values and the values of another b3QuadWord
|
||||
@@ -220,15 +220,15 @@ public:
|
||||
*/
|
||||
SIMD_FORCE_INLINE void setMin(const b3QuadWord& other)
|
||||
{
|
||||
#ifdef BT_USE_SSE
|
||||
#ifdef B3_USE_SSE
|
||||
mVec128 = _mm_min_ps(mVec128, other.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vminq_f32(mVec128, other.mVec128);
|
||||
#else
|
||||
btSetMin(m_floats[0], other.m_floats[0]);
|
||||
btSetMin(m_floats[1], other.m_floats[1]);
|
||||
btSetMin(m_floats[2], other.m_floats[2]);
|
||||
btSetMin(m_floats[3], other.m_floats[3]);
|
||||
b3SetMin(m_floats[0], other.m_floats[0]);
|
||||
b3SetMin(m_floats[1], other.m_floats[1]);
|
||||
b3SetMin(m_floats[2], other.m_floats[2]);
|
||||
b3SetMin(m_floats[3], other.m_floats[3]);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -236,4 +236,4 @@ public:
|
||||
|
||||
};
|
||||
|
||||
#endif //BT_SIMD_QUADWORD_H
|
||||
#endif //B3_SIMD_QUADWORD_H
|
||||
|
||||
@@ -14,8 +14,8 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
#ifndef BT_SIMD__QUATERNION_H_
|
||||
#define BT_SIMD__QUATERNION_H_
|
||||
#ifndef B3_SIMD__QUATERNION_H_
|
||||
#define B3_SIMD__QUATERNION_H_
|
||||
|
||||
|
||||
#include "b3Vector3.h"
|
||||
@@ -25,16 +25,16 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
#ifdef BT_USE_SSE
|
||||
#ifdef B3_USE_SSE
|
||||
|
||||
const __m128 ATTRIBUTE_ALIGNED16(vOnes) = {1.0f, 1.0f, 1.0f, 1.0f};
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(BT_USE_SSE) || defined(BT_USE_NEON)
|
||||
#if defined(B3_USE_SSE) || defined(B3_USE_NEON)
|
||||
|
||||
const btSimdFloat4 ATTRIBUTE_ALIGNED16(vQInv) = {-0.0f, -0.0f, -0.0f, +0.0f};
|
||||
const btSimdFloat4 ATTRIBUTE_ALIGNED16(vPPPM) = {+0.0f, +0.0f, +0.0f, -0.0f};
|
||||
const b3SimdFloat4 ATTRIBUTE_ALIGNED16(vQInv) = {-0.0f, -0.0f, -0.0f, +0.0f};
|
||||
const b3SimdFloat4 ATTRIBUTE_ALIGNED16(vPPPM) = {+0.0f, +0.0f, +0.0f, -0.0f};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -44,9 +44,9 @@ public:
|
||||
/**@brief No initialization constructor */
|
||||
b3Quaternion() {}
|
||||
|
||||
#if (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))|| defined(BT_USE_NEON)
|
||||
#if (defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE))|| defined(B3_USE_NEON)
|
||||
// Set Vector
|
||||
SIMD_FORCE_INLINE b3Quaternion(const btSimdFloat4 vec)
|
||||
SIMD_FORCE_INLINE b3Quaternion(const b3SimdFloat4 vec)
|
||||
{
|
||||
mVec128 = vec;
|
||||
}
|
||||
@@ -74,7 +74,7 @@ public:
|
||||
b3Quaternion(const b3Scalar& _x, const b3Scalar& _y, const b3Scalar& _z, const b3Scalar& _w)
|
||||
: b3QuadWord(_x, _y, _z, _w)
|
||||
{
|
||||
btAssert(!((_x==1.f) && (_y==0.f) && (_z==0.f) && (_w==0.f)));
|
||||
b3Assert(!((_x==1.f) && (_y==0.f) && (_z==0.f) && (_w==0.f)));
|
||||
}
|
||||
/**@brief Axis angle Constructor
|
||||
* @param axis The axis which the rotation is around
|
||||
@@ -84,12 +84,12 @@ public:
|
||||
setRotation(_axis, _angle);
|
||||
}
|
||||
/**@brief Constructor from Euler angles
|
||||
* @param yaw Angle around Y unless BT_EULER_DEFAULT_ZYX defined then Z
|
||||
* @param pitch Angle around X unless BT_EULER_DEFAULT_ZYX defined then Y
|
||||
* @param roll Angle around Z unless BT_EULER_DEFAULT_ZYX defined then X */
|
||||
* @param yaw Angle around Y unless B3_EULER_DEFAULT_ZYX defined then Z
|
||||
* @param pitch Angle around X unless B3_EULER_DEFAULT_ZYX defined then Y
|
||||
* @param roll Angle around Z unless B3_EULER_DEFAULT_ZYX defined then X */
|
||||
b3Quaternion(const b3Scalar& yaw, const b3Scalar& pitch, const b3Scalar& roll)
|
||||
{
|
||||
#ifndef BT_EULER_DEFAULT_ZYX
|
||||
#ifndef B3_EULER_DEFAULT_ZYX
|
||||
setEuler(yaw, pitch, roll);
|
||||
#else
|
||||
setEulerZYX(yaw, pitch, roll);
|
||||
@@ -101,10 +101,10 @@ public:
|
||||
void setRotation(const b3Vector3& axis, const b3Scalar& _angle)
|
||||
{
|
||||
b3Scalar d = axis.length();
|
||||
btAssert(d != b3Scalar(0.0));
|
||||
b3Scalar s = btSin(_angle * b3Scalar(0.5)) / d;
|
||||
b3Assert(d != b3Scalar(0.0));
|
||||
b3Scalar s = b3Sin(_angle * b3Scalar(0.5)) / d;
|
||||
setValue(axis.getX() * s, axis.getY() * s, axis.getZ() * s,
|
||||
btCos(_angle * b3Scalar(0.5)));
|
||||
b3Cos(_angle * b3Scalar(0.5)));
|
||||
}
|
||||
/**@brief Set the quaternion using Euler angles
|
||||
* @param yaw Angle around Y
|
||||
@@ -115,12 +115,12 @@ public:
|
||||
b3Scalar halfYaw = b3Scalar(yaw) * b3Scalar(0.5);
|
||||
b3Scalar halfPitch = b3Scalar(pitch) * b3Scalar(0.5);
|
||||
b3Scalar halfRoll = b3Scalar(roll) * b3Scalar(0.5);
|
||||
b3Scalar cosYaw = btCos(halfYaw);
|
||||
b3Scalar sinYaw = btSin(halfYaw);
|
||||
b3Scalar cosPitch = btCos(halfPitch);
|
||||
b3Scalar sinPitch = btSin(halfPitch);
|
||||
b3Scalar cosRoll = btCos(halfRoll);
|
||||
b3Scalar sinRoll = btSin(halfRoll);
|
||||
b3Scalar cosYaw = b3Cos(halfYaw);
|
||||
b3Scalar sinYaw = b3Sin(halfYaw);
|
||||
b3Scalar cosPitch = b3Cos(halfPitch);
|
||||
b3Scalar sinPitch = b3Sin(halfPitch);
|
||||
b3Scalar cosRoll = b3Cos(halfRoll);
|
||||
b3Scalar sinRoll = b3Sin(halfRoll);
|
||||
setValue(cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw,
|
||||
cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw,
|
||||
sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw,
|
||||
@@ -135,12 +135,12 @@ public:
|
||||
b3Scalar halfYaw = b3Scalar(yaw) * b3Scalar(0.5);
|
||||
b3Scalar halfPitch = b3Scalar(pitch) * b3Scalar(0.5);
|
||||
b3Scalar halfRoll = b3Scalar(roll) * b3Scalar(0.5);
|
||||
b3Scalar cosYaw = btCos(halfYaw);
|
||||
b3Scalar sinYaw = btSin(halfYaw);
|
||||
b3Scalar cosPitch = btCos(halfPitch);
|
||||
b3Scalar sinPitch = btSin(halfPitch);
|
||||
b3Scalar cosRoll = btCos(halfRoll);
|
||||
b3Scalar sinRoll = btSin(halfRoll);
|
||||
b3Scalar cosYaw = b3Cos(halfYaw);
|
||||
b3Scalar sinYaw = b3Sin(halfYaw);
|
||||
b3Scalar cosPitch = b3Cos(halfPitch);
|
||||
b3Scalar sinPitch = b3Sin(halfPitch);
|
||||
b3Scalar cosRoll = b3Cos(halfRoll);
|
||||
b3Scalar sinRoll = b3Sin(halfRoll);
|
||||
setValue(sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw, //x
|
||||
cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw, //y
|
||||
cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw, //z
|
||||
@@ -150,9 +150,9 @@ public:
|
||||
* @param q The quaternion to add to this one */
|
||||
SIMD_FORCE_INLINE b3Quaternion& operator+=(const b3Quaternion& q)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = _mm_add_ps(mVec128, q.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vaddq_f32(mVec128, q.mVec128);
|
||||
#else
|
||||
m_floats[0] += q.getX();
|
||||
@@ -167,9 +167,9 @@ public:
|
||||
* @param q The quaternion to subtract from this one */
|
||||
b3Quaternion& operator-=(const b3Quaternion& q)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = _mm_sub_ps(mVec128, q.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vsubq_f32(mVec128, q.mVec128);
|
||||
#else
|
||||
m_floats[0] -= q.getX();
|
||||
@@ -184,11 +184,11 @@ public:
|
||||
* @param s The scalar to scale by */
|
||||
b3Quaternion& operator*=(const b3Scalar& s)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vs = _mm_load_ss(&s); // (S 0 0 0)
|
||||
vs = bt_pshufd_ps(vs, 0); // (S S S S)
|
||||
vs = b3_pshufd_ps(vs, 0); // (S S S S)
|
||||
mVec128 = _mm_mul_ps(mVec128, vs);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vmulq_n_f32(mVec128, s);
|
||||
#else
|
||||
m_floats[0] *= s;
|
||||
@@ -204,25 +204,25 @@ public:
|
||||
* Equivilant to this = this * q */
|
||||
b3Quaternion& operator*=(const b3Quaternion& q)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vQ2 = q.get128();
|
||||
|
||||
__m128 A1 = bt_pshufd_ps(mVec128, BT_SHUFFLE(0,1,2,0));
|
||||
__m128 B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(3,3,3,0));
|
||||
__m128 A1 = b3_pshufd_ps(mVec128, B3_SHUFFLE(0,1,2,0));
|
||||
__m128 B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(3,3,3,0));
|
||||
|
||||
A1 = A1 * B1;
|
||||
|
||||
__m128 A2 = bt_pshufd_ps(mVec128, BT_SHUFFLE(1,2,0,1));
|
||||
__m128 B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1));
|
||||
__m128 A2 = b3_pshufd_ps(mVec128, B3_SHUFFLE(1,2,0,1));
|
||||
__m128 B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2,0,1,1));
|
||||
|
||||
A2 = A2 * B2;
|
||||
|
||||
B1 = bt_pshufd_ps(mVec128, BT_SHUFFLE(2,0,1,2));
|
||||
B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2));
|
||||
B1 = b3_pshufd_ps(mVec128, B3_SHUFFLE(2,0,1,2));
|
||||
B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1,2,0,2));
|
||||
|
||||
B1 = B1 * B2; // A3 *= B3
|
||||
|
||||
mVec128 = bt_splat_ps(mVec128, 3); // A0
|
||||
mVec128 = b3_splat_ps(mVec128, 3); // A0
|
||||
mVec128 = mVec128 * vQ2; // A0 * B0
|
||||
|
||||
A1 = A1 + A2; // AB12
|
||||
@@ -230,7 +230,7 @@ public:
|
||||
A1 = _mm_xor_ps(A1, vPPPM); // change sign of the last element
|
||||
mVec128 = mVec128+ A1; // AB03 + AB12
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
|
||||
float32x4_t vQ1 = mVec128;
|
||||
float32x4_t vQ2 = q.get128();
|
||||
@@ -270,7 +270,7 @@ public:
|
||||
A0 = vsubq_f32(A0, A3); // AB03 = AB0 - AB3
|
||||
|
||||
// change the sign of the last element
|
||||
A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
A0 = vaddq_f32(A0, A1); // AB03 + AB12
|
||||
|
||||
mVec128 = A0;
|
||||
@@ -287,7 +287,7 @@ public:
|
||||
* @param q The other quaternion */
|
||||
b3Scalar dot(const b3Quaternion& q) const
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vd;
|
||||
|
||||
vd = _mm_mul_ps(mVec128, q.mVec128);
|
||||
@@ -298,7 +298,7 @@ public:
|
||||
vd = _mm_add_ss(vd, t);
|
||||
|
||||
return _mm_cvtss_f32(vd);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
float32x4_t vd = vmulq_f32(mVec128, q.mVec128);
|
||||
float32x2_t x = vpadd_f32(vget_low_f32(vd), vget_high_f32(vd));
|
||||
x = vpadd_f32(x, x);
|
||||
@@ -320,14 +320,14 @@ public:
|
||||
/**@brief Return the length of the quaternion */
|
||||
b3Scalar length() const
|
||||
{
|
||||
return btSqrt(length2());
|
||||
return b3Sqrt(length2());
|
||||
}
|
||||
|
||||
/**@brief Normalize the quaternion
|
||||
* Such that x^2 + y^2 + z^2 +w^2 = 1 */
|
||||
b3Quaternion& normalize()
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vd;
|
||||
|
||||
vd = _mm_mul_ps(mVec128, mVec128);
|
||||
@@ -339,7 +339,7 @@ public:
|
||||
|
||||
vd = _mm_sqrt_ss(vd);
|
||||
vd = _mm_div_ss(vOnes, vd);
|
||||
vd = bt_pshufd_ps(vd, 0); // splat
|
||||
vd = b3_pshufd_ps(vd, 0); // splat
|
||||
mVec128 = _mm_mul_ps(mVec128, vd);
|
||||
|
||||
return *this;
|
||||
@@ -353,12 +353,12 @@ public:
|
||||
SIMD_FORCE_INLINE b3Quaternion
|
||||
operator*(const b3Scalar& s) const
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vs = _mm_load_ss(&s); // (S 0 0 0)
|
||||
vs = bt_pshufd_ps(vs, 0x00); // (S S S S)
|
||||
vs = b3_pshufd_ps(vs, 0x00); // (S S S S)
|
||||
|
||||
return b3Quaternion(_mm_mul_ps(mVec128, vs));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Quaternion(vmulq_n_f32(mVec128, s));
|
||||
#else
|
||||
return b3Quaternion(getX() * s, getY() * s, getZ() * s, m_floats[3] * s);
|
||||
@@ -369,7 +369,7 @@ public:
|
||||
* @param s The inverse scale factor */
|
||||
b3Quaternion operator/(const b3Scalar& s) const
|
||||
{
|
||||
btAssert(s != b3Scalar(0.0));
|
||||
b3Assert(s != b3Scalar(0.0));
|
||||
return *this * (b3Scalar(1.0) / s);
|
||||
}
|
||||
|
||||
@@ -377,7 +377,7 @@ public:
|
||||
* @param s The scale factor */
|
||||
b3Quaternion& operator/=(const b3Scalar& s)
|
||||
{
|
||||
btAssert(s != b3Scalar(0.0));
|
||||
b3Assert(s != b3Scalar(0.0));
|
||||
return *this *= b3Scalar(1.0) / s;
|
||||
}
|
||||
|
||||
@@ -390,14 +390,14 @@ public:
|
||||
* @param q The other quaternion */
|
||||
b3Scalar angle(const b3Quaternion& q) const
|
||||
{
|
||||
b3Scalar s = btSqrt(length2() * q.length2());
|
||||
btAssert(s != b3Scalar(0.0));
|
||||
return btAcos(dot(q) / s);
|
||||
b3Scalar s = b3Sqrt(length2() * q.length2());
|
||||
b3Assert(s != b3Scalar(0.0));
|
||||
return b3Acos(dot(q) / s);
|
||||
}
|
||||
/**@brief Return the angle of rotation represented by this quaternion */
|
||||
b3Scalar getAngle() const
|
||||
{
|
||||
b3Scalar s = b3Scalar(2.) * btAcos(m_floats[3]);
|
||||
b3Scalar s = b3Scalar(2.) * b3Acos(m_floats[3]);
|
||||
return s;
|
||||
}
|
||||
|
||||
@@ -408,17 +408,17 @@ public:
|
||||
|
||||
if (s_squared < b3Scalar(10.) * SIMD_EPSILON) //Check for divide by zero
|
||||
return b3Vector3(1.0, 0.0, 0.0); // Arbitrary
|
||||
b3Scalar s = 1.f/btSqrt(s_squared);
|
||||
b3Scalar s = 1.f/b3Sqrt(s_squared);
|
||||
return b3Vector3(m_floats[0] * s, m_floats[1] * s, m_floats[2] * s);
|
||||
}
|
||||
|
||||
/**@brief Return the inverse of this quaternion */
|
||||
b3Quaternion inverse() const
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Quaternion(_mm_xor_ps(mVec128, vQInv));
|
||||
#elif defined(BT_USE_NEON)
|
||||
return b3Quaternion((btSimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)vQInv));
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Quaternion((b3SimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)vQInv));
|
||||
#else
|
||||
return b3Quaternion(-m_floats[0], -m_floats[1], -m_floats[2], m_floats[3]);
|
||||
#endif
|
||||
@@ -429,9 +429,9 @@ public:
|
||||
SIMD_FORCE_INLINE b3Quaternion
|
||||
operator+(const b3Quaternion& q2) const
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Quaternion(_mm_add_ps(mVec128, q2.mVec128));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Quaternion(vaddq_f32(mVec128, q2.mVec128));
|
||||
#else
|
||||
const b3Quaternion& q1 = *this;
|
||||
@@ -444,9 +444,9 @@ public:
|
||||
SIMD_FORCE_INLINE b3Quaternion
|
||||
operator-(const b3Quaternion& q2) const
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Quaternion(_mm_sub_ps(mVec128, q2.mVec128));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Quaternion(vsubq_f32(mVec128, q2.mVec128));
|
||||
#else
|
||||
const b3Quaternion& q1 = *this;
|
||||
@@ -458,10 +458,10 @@ public:
|
||||
* This simply negates each element */
|
||||
SIMD_FORCE_INLINE b3Quaternion operator-() const
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
return b3Quaternion(_mm_xor_ps(mVec128, btvMzeroMask));
|
||||
#elif defined(BT_USE_NEON)
|
||||
return b3Quaternion((btSimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)btvMzeroMask) );
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Quaternion(_mm_xor_ps(mVec128, b3vMzeroMask));
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Quaternion((b3SimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)b3vMzeroMask) );
|
||||
#else
|
||||
const b3Quaternion& q2 = *this;
|
||||
return b3Quaternion( - q2.getX(), - q2.getY(), - q2.getZ(), - q2.m_floats[3]);
|
||||
@@ -496,19 +496,19 @@ public:
|
||||
* Slerp interpolates assuming constant velocity. */
|
||||
b3Quaternion slerp(const b3Quaternion& q, const b3Scalar& t) const
|
||||
{
|
||||
b3Scalar magnitude = btSqrt(length2() * q.length2());
|
||||
btAssert(magnitude > b3Scalar(0));
|
||||
b3Scalar magnitude = b3Sqrt(length2() * q.length2());
|
||||
b3Assert(magnitude > b3Scalar(0));
|
||||
|
||||
b3Scalar product = dot(q) / magnitude;
|
||||
if (btFabs(product) < b3Scalar(1))
|
||||
if (b3Fabs(product) < b3Scalar(1))
|
||||
{
|
||||
// Take care of long angle case see http://en.wikipedia.org/wiki/Slerp
|
||||
const b3Scalar sign = (product < 0) ? b3Scalar(-1) : b3Scalar(1);
|
||||
|
||||
const b3Scalar theta = btAcos(sign * product);
|
||||
const b3Scalar s1 = btSin(sign * t * theta);
|
||||
const b3Scalar d = b3Scalar(1.0) / btSin(theta);
|
||||
const b3Scalar s0 = btSin((b3Scalar(1.0) - t) * theta);
|
||||
const b3Scalar theta = b3Acos(sign * product);
|
||||
const b3Scalar s1 = b3Sin(sign * t * theta);
|
||||
const b3Scalar d = b3Scalar(1.0) / b3Sin(theta);
|
||||
const b3Scalar s0 = b3Sin((b3Scalar(1.0) - t) * theta);
|
||||
|
||||
return b3Quaternion(
|
||||
(m_floats[0] * s0 + q.getX() * s1) * d,
|
||||
@@ -541,27 +541,27 @@ public:
|
||||
SIMD_FORCE_INLINE b3Quaternion
|
||||
operator*(const b3Quaternion& q1, const b3Quaternion& q2)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vQ1 = q1.get128();
|
||||
__m128 vQ2 = q2.get128();
|
||||
__m128 A0, A1, B1, A2, B2;
|
||||
|
||||
A1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(0,1,2,0)); // X Y z x // vtrn
|
||||
B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(3,3,3,0)); // W W W X // vdup vext
|
||||
A1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(0,1,2,0)); // X Y z x // vtrn
|
||||
B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(3,3,3,0)); // W W W X // vdup vext
|
||||
|
||||
A1 = A1 * B1;
|
||||
|
||||
A2 = bt_pshufd_ps(vQ1, BT_SHUFFLE(1,2,0,1)); // Y Z X Y // vext
|
||||
B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1)); // z x Y Y // vtrn vdup
|
||||
A2 = b3_pshufd_ps(vQ1, B3_SHUFFLE(1,2,0,1)); // Y Z X Y // vext
|
||||
B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2,0,1,1)); // z x Y Y // vtrn vdup
|
||||
|
||||
A2 = A2 * B2;
|
||||
|
||||
B1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(2,0,1,2)); // z x Y Z // vtrn vext
|
||||
B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2)); // Y Z x z // vext vtrn
|
||||
B1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(2,0,1,2)); // z x Y Z // vtrn vext
|
||||
B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1,2,0,2)); // Y Z x z // vext vtrn
|
||||
|
||||
B1 = B1 * B2; // A3 *= B3
|
||||
|
||||
A0 = bt_splat_ps(vQ1, 3); // A0
|
||||
A0 = b3_splat_ps(vQ1, 3); // A0
|
||||
A0 = A0 * vQ2; // A0 * B0
|
||||
|
||||
A1 = A1 + A2; // AB12
|
||||
@@ -572,7 +572,7 @@ operator*(const b3Quaternion& q1, const b3Quaternion& q2)
|
||||
|
||||
return b3Quaternion(A0);
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
|
||||
float32x4_t vQ1 = q1.get128();
|
||||
float32x4_t vQ2 = q2.get128();
|
||||
@@ -612,7 +612,7 @@ operator*(const b3Quaternion& q1, const b3Quaternion& q2)
|
||||
A0 = vsubq_f32(A0, A3); // AB03 = AB0 - AB3
|
||||
|
||||
// change the sign of the last element
|
||||
A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
A0 = vaddq_f32(A0, A1); // AB03 + AB12
|
||||
|
||||
return b3Quaternion(A0);
|
||||
@@ -629,23 +629,23 @@ operator*(const b3Quaternion& q1, const b3Quaternion& q2)
|
||||
SIMD_FORCE_INLINE b3Quaternion
|
||||
operator*(const b3Quaternion& q, const b3Vector3& w)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vQ1 = q.get128();
|
||||
__m128 vQ2 = w.get128();
|
||||
__m128 A1, B1, A2, B2, A3, B3;
|
||||
|
||||
A1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(3,3,3,0));
|
||||
B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(0,1,2,0));
|
||||
A1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(3,3,3,0));
|
||||
B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(0,1,2,0));
|
||||
|
||||
A1 = A1 * B1;
|
||||
|
||||
A2 = bt_pshufd_ps(vQ1, BT_SHUFFLE(1,2,0,1));
|
||||
B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1));
|
||||
A2 = b3_pshufd_ps(vQ1, B3_SHUFFLE(1,2,0,1));
|
||||
B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2,0,1,1));
|
||||
|
||||
A2 = A2 * B2;
|
||||
|
||||
A3 = bt_pshufd_ps(vQ1, BT_SHUFFLE(2,0,1,2));
|
||||
B3 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2));
|
||||
A3 = b3_pshufd_ps(vQ1, B3_SHUFFLE(2,0,1,2));
|
||||
B3 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1,2,0,2));
|
||||
|
||||
A3 = A3 * B3; // A3 *= B3
|
||||
|
||||
@@ -655,7 +655,7 @@ operator*(const b3Quaternion& q, const b3Vector3& w)
|
||||
|
||||
return b3Quaternion(A1);
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
|
||||
float32x4_t vQ1 = q.get128();
|
||||
float32x4_t vQ2 = w.get128();
|
||||
@@ -694,7 +694,7 @@ operator*(const b3Quaternion& q, const b3Vector3& w)
|
||||
A1 = vaddq_f32(A1, A2); // AB12 = AB1 + AB2
|
||||
|
||||
// change the sign of the last element
|
||||
A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
|
||||
A1 = vsubq_f32(A1, A3); // AB123 = AB12 - AB3
|
||||
|
||||
@@ -712,23 +712,23 @@ operator*(const b3Quaternion& q, const b3Vector3& w)
|
||||
SIMD_FORCE_INLINE b3Quaternion
|
||||
operator*(const b3Vector3& w, const b3Quaternion& q)
|
||||
{
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vQ1 = w.get128();
|
||||
__m128 vQ2 = q.get128();
|
||||
__m128 A1, B1, A2, B2, A3, B3;
|
||||
|
||||
A1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(0,1,2,0)); // X Y z x
|
||||
B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(3,3,3,0)); // W W W X
|
||||
A1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(0,1,2,0)); // X Y z x
|
||||
B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(3,3,3,0)); // W W W X
|
||||
|
||||
A1 = A1 * B1;
|
||||
|
||||
A2 = bt_pshufd_ps(vQ1, BT_SHUFFLE(1,2,0,1));
|
||||
B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1));
|
||||
A2 = b3_pshufd_ps(vQ1, B3_SHUFFLE(1,2,0,1));
|
||||
B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2,0,1,1));
|
||||
|
||||
A2 = A2 *B2;
|
||||
|
||||
A3 = bt_pshufd_ps(vQ1, BT_SHUFFLE(2,0,1,2));
|
||||
B3 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2));
|
||||
A3 = b3_pshufd_ps(vQ1, B3_SHUFFLE(2,0,1,2));
|
||||
B3 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1,2,0,2));
|
||||
|
||||
A3 = A3 * B3; // A3 *= B3
|
||||
|
||||
@@ -738,7 +738,7 @@ operator*(const b3Vector3& w, const b3Quaternion& q)
|
||||
|
||||
return b3Quaternion(A1);
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
|
||||
float32x4_t vQ1 = w.get128();
|
||||
float32x4_t vQ2 = q.get128();
|
||||
@@ -777,7 +777,7 @@ operator*(const b3Vector3& w, const b3Quaternion& q)
|
||||
A1 = vaddq_f32(A1, A2); // AB12 = AB1 + AB2
|
||||
|
||||
// change the sign of the last element
|
||||
A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);
|
||||
|
||||
A1 = vsubq_f32(A1, A3); // AB123 = AB12 - AB3
|
||||
|
||||
@@ -809,7 +809,7 @@ length(const b3Quaternion& q)
|
||||
|
||||
/**@brief Return the angle between two quaternions*/
|
||||
SIMD_FORCE_INLINE b3Scalar
|
||||
btAngle(const b3Quaternion& q1, const b3Quaternion& q2)
|
||||
b3Angle(const b3Quaternion& q1, const b3Quaternion& q2)
|
||||
{
|
||||
return q1.angle(q2);
|
||||
}
|
||||
@@ -837,10 +837,10 @@ quatRotate(const b3Quaternion& rotation, const b3Vector3& v)
|
||||
{
|
||||
b3Quaternion q = rotation * v;
|
||||
q *= rotation.inverse();
|
||||
#if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
return b3Vector3(_mm_and_ps(q.get128(), btvFFF0fMask));
|
||||
#elif defined(BT_USE_NEON)
|
||||
return b3Vector3((float32x4_t)vandq_s32((int32x4_t)q.get128(), btvFFF0Mask));
|
||||
#if defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Vector3(_mm_and_ps(q.get128(), b3vFFF0fMask));
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Vector3((float32x4_t)vandq_s32((int32x4_t)q.get128(), b3vFFF0Mask));
|
||||
#else
|
||||
return b3Vector3(q.getX(),q.getY(),q.getZ());
|
||||
#endif
|
||||
@@ -855,11 +855,11 @@ shortestArcQuat(const b3Vector3& v0, const b3Vector3& v1) // Game Programming Ge
|
||||
if (d < -1.0 + SIMD_EPSILON)
|
||||
{
|
||||
b3Vector3 n,unused;
|
||||
btPlaneSpace1(v0,n,unused);
|
||||
b3PlaneSpace1(v0,n,unused);
|
||||
return b3Quaternion(n.getX(),n.getY(),n.getZ(),0.0f); // just pick any vector that is orthogonal to v0
|
||||
}
|
||||
|
||||
b3Scalar s = btSqrt((1.0f + d) * 2.0f);
|
||||
b3Scalar s = b3Sqrt((1.0f + d) * 2.0f);
|
||||
b3Scalar rs = 1.0f / s;
|
||||
|
||||
return b3Quaternion(c.getX()*rs,c.getY()*rs,c.getZ()*rs,s * 0.5f);
|
||||
@@ -873,7 +873,7 @@ shortestArcQuatNormalize2(b3Vector3& v0,b3Vector3& v1)
|
||||
return shortestArcQuat(v0,v1);
|
||||
}
|
||||
|
||||
#endif //BT_SIMD__QUATERNION_H_
|
||||
#endif //B3_SIMD__QUATERNION_H_
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
|
||||
#include "b3Quickprof.h"
|
||||
|
||||
#ifndef BT_NO_PROFILE
|
||||
#ifndef B3_NO_PROFILE
|
||||
|
||||
|
||||
static btClock gProfileClock;
|
||||
static b3Clock gProfileClock;
|
||||
|
||||
|
||||
#ifdef __CELLOS_LV2__
|
||||
@@ -33,7 +33,7 @@ static btClock gProfileClock;
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
|
||||
#define BT_USE_WINDOWS_TIMERS
|
||||
#define B3_USE_WINDOWS_TIMERS
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#define NOWINRES
|
||||
#define NOMCX
|
||||
@@ -54,10 +54,10 @@ static btClock gProfileClock;
|
||||
|
||||
#define mymin(a,b) (a > b ? a : b)
|
||||
|
||||
struct btClockData
|
||||
struct b3ClockData
|
||||
{
|
||||
|
||||
#ifdef BT_USE_WINDOWS_TIMERS
|
||||
#ifdef B3_USE_WINDOWS_TIMERS
|
||||
LARGE_INTEGER mClockFrequency;
|
||||
DWORD mStartTick;
|
||||
LONGLONG mPrevElapsedTime;
|
||||
@@ -72,28 +72,28 @@ struct btClockData
|
||||
|
||||
};
|
||||
|
||||
///The btClock is a portable basic clock that measures accurate time in seconds, use for profiling.
|
||||
btClock::btClock()
|
||||
///The b3Clock is a portable basic clock that measures accurate time in seconds, use for profiling.
|
||||
b3Clock::b3Clock()
|
||||
{
|
||||
m_data = new btClockData;
|
||||
#ifdef BT_USE_WINDOWS_TIMERS
|
||||
m_data = new b3ClockData;
|
||||
#ifdef B3_USE_WINDOWS_TIMERS
|
||||
QueryPerformanceFrequency(&m_data->mClockFrequency);
|
||||
#endif
|
||||
reset();
|
||||
}
|
||||
|
||||
btClock::~btClock()
|
||||
b3Clock::~b3Clock()
|
||||
{
|
||||
delete m_data;
|
||||
}
|
||||
|
||||
btClock::btClock(const btClock& other)
|
||||
b3Clock::b3Clock(const b3Clock& other)
|
||||
{
|
||||
m_data = new btClockData;
|
||||
m_data = new b3ClockData;
|
||||
*m_data = *other.m_data;
|
||||
}
|
||||
|
||||
btClock& btClock::operator=(const btClock& other)
|
||||
b3Clock& b3Clock::operator=(const b3Clock& other)
|
||||
{
|
||||
*m_data = *other.m_data;
|
||||
return *this;
|
||||
@@ -101,9 +101,9 @@ btClock& btClock::operator=(const btClock& other)
|
||||
|
||||
|
||||
/// Resets the initial reference time.
|
||||
void btClock::reset()
|
||||
void b3Clock::reset()
|
||||
{
|
||||
#ifdef BT_USE_WINDOWS_TIMERS
|
||||
#ifdef B3_USE_WINDOWS_TIMERS
|
||||
QueryPerformanceCounter(&m_data->mStartTime);
|
||||
m_data->mStartTick = GetTickCount();
|
||||
m_data->mPrevElapsedTime = 0;
|
||||
@@ -122,10 +122,10 @@ void btClock::reset()
|
||||
}
|
||||
|
||||
/// Returns the time in ms since the last call to reset or since
|
||||
/// the btClock was created.
|
||||
unsigned long int btClock::getTimeMilliseconds()
|
||||
/// the b3Clock was created.
|
||||
unsigned long int b3Clock::getTimeMilliseconds()
|
||||
{
|
||||
#ifdef BT_USE_WINDOWS_TIMERS
|
||||
#ifdef B3_USE_WINDOWS_TIMERS
|
||||
LARGE_INTEGER currentTime;
|
||||
QueryPerformanceCounter(¤tTime);
|
||||
LONGLONG elapsedTime = currentTime.QuadPart -
|
||||
@@ -179,9 +179,9 @@ unsigned long int btClock::getTimeMilliseconds()
|
||||
|
||||
/// Returns the time in us since the last call to reset or since
|
||||
/// the Clock was created.
|
||||
unsigned long int btClock::getTimeMicroseconds()
|
||||
unsigned long int b3Clock::getTimeMicroseconds()
|
||||
{
|
||||
#ifdef BT_USE_WINDOWS_TIMERS
|
||||
#ifdef B3_USE_WINDOWS_TIMERS
|
||||
LARGE_INTEGER currentTime;
|
||||
QueryPerformanceCounter(¤tTime);
|
||||
LONGLONG elapsedTime = currentTime.QuadPart -
|
||||
@@ -563,4 +563,4 @@ void CProfileManager::dumpAll()
|
||||
|
||||
|
||||
|
||||
#endif //BT_NO_PROFILE
|
||||
#endif //B3_NO_PROFILE
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
|
||||
|
||||
|
||||
#ifndef BT_QUICK_PROF_H
|
||||
#define BT_QUICK_PROF_H
|
||||
#ifndef B3_QUICK_PROF_H
|
||||
#define B3_QUICK_PROF_H
|
||||
|
||||
//To disable built-in profiling, please comment out next line
|
||||
//#define BT_NO_PROFILE 1
|
||||
#ifndef BT_NO_PROFILE
|
||||
//#define B3_NO_PROFILE 1
|
||||
#ifndef B3_NO_PROFILE
|
||||
#include <stdio.h>//@todo remove this, backwards compatibility
|
||||
#include "b3Scalar.h"
|
||||
#include "b3AlignedAllocator.h"
|
||||
@@ -31,29 +31,29 @@
|
||||
|
||||
#ifdef USE_BT_CLOCK
|
||||
|
||||
///The btClock is a portable basic clock that measures accurate time in seconds, use for profiling.
|
||||
class btClock
|
||||
///The b3Clock is a portable basic clock that measures accurate time in seconds, use for profiling.
|
||||
class b3Clock
|
||||
{
|
||||
public:
|
||||
btClock();
|
||||
b3Clock();
|
||||
|
||||
btClock(const btClock& other);
|
||||
btClock& operator=(const btClock& other);
|
||||
b3Clock(const b3Clock& other);
|
||||
b3Clock& operator=(const b3Clock& other);
|
||||
|
||||
~btClock();
|
||||
~b3Clock();
|
||||
|
||||
/// Resets the initial reference time.
|
||||
void reset();
|
||||
|
||||
/// Returns the time in ms since the last call to reset or since
|
||||
/// the btClock was created.
|
||||
/// the b3Clock was created.
|
||||
unsigned long int getTimeMilliseconds();
|
||||
|
||||
/// Returns the time in us since the last call to reset or since
|
||||
/// the Clock was created.
|
||||
unsigned long int getTimeMicroseconds();
|
||||
private:
|
||||
struct btClockData* m_data;
|
||||
struct b3ClockData* m_data;
|
||||
};
|
||||
|
||||
#endif //USE_BT_CLOCK
|
||||
@@ -173,7 +173,7 @@ private:
|
||||
|
||||
|
||||
///ProfileSampleClass is a simple way to profile a function's scope
|
||||
///Use the BT_PROFILE macro at the start of scope to time
|
||||
///Use the B3_PROFILE macro at the start of scope to time
|
||||
class CProfileSample {
|
||||
public:
|
||||
CProfileSample( const char * name )
|
||||
@@ -188,16 +188,16 @@ public:
|
||||
};
|
||||
|
||||
|
||||
#define BT_PROFILE( name ) CProfileSample __profile( name )
|
||||
#define B3_PROFILE( name ) CProfileSample __profile( name )
|
||||
|
||||
#else
|
||||
|
||||
#define BT_PROFILE( name )
|
||||
#define B3_PROFILE( name )
|
||||
|
||||
#endif //#ifndef BT_NO_PROFILE
|
||||
#endif //#ifndef B3_NO_PROFILE
|
||||
|
||||
|
||||
|
||||
#endif //BT_QUICK_PROF_H
|
||||
#endif //B3_QUICK_PROF_H
|
||||
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
#ifndef BT_GEN_RANDOM_H
|
||||
#define BT_GEN_RANDOM_H
|
||||
#ifndef B3_GEN_RANDOM_H
|
||||
#define B3_GEN_RANDOM_H
|
||||
|
||||
#ifdef MT19937
|
||||
|
||||
@@ -38,5 +38,5 @@ SIMD_FORCE_INLINE unsigned int GEN_rand() { return rand(); }
|
||||
|
||||
#endif
|
||||
|
||||
#endif //BT_GEN_RANDOM_H
|
||||
#endif //B3_GEN_RANDOM_H
|
||||
|
||||
|
||||
@@ -14,10 +14,10 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
#ifndef BT_SCALAR_H
|
||||
#define BT_SCALAR_H
|
||||
#ifndef B3_SCALAR_H
|
||||
#define B3_SCALAR_H
|
||||
|
||||
#ifdef BT_MANAGED_CODE
|
||||
#ifdef B3_MANAGED_CODE
|
||||
//Aligned data types not supported in managed code
|
||||
#pragma unmanaged
|
||||
#endif
|
||||
@@ -28,15 +28,15 @@ subject to the following restrictions:
|
||||
#include <float.h>
|
||||
|
||||
/* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/
|
||||
#define BT_BULLET_VERSION 281
|
||||
#define B3_BULLET_VERSION 281
|
||||
|
||||
inline int btGetVersion()
|
||||
inline int b3GetVersion()
|
||||
{
|
||||
return BT_BULLET_VERSION;
|
||||
return B3_BULLET_VERSION;
|
||||
}
|
||||
|
||||
#if defined(DEBUG) || defined (_DEBUG)
|
||||
#define BT_DEBUG
|
||||
#define B3_DEBUG
|
||||
#endif
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ inline int btGetVersion()
|
||||
#define ATTRIBUTE_ALIGNED64(a) a
|
||||
#define ATTRIBUTE_ALIGNED128(a) a
|
||||
#else
|
||||
//#define BT_HAS_ALIGNED_ALLOCATOR
|
||||
//#define B3_HAS_ALIGNED_ALLOCATOR
|
||||
#pragma warning(disable : 4324) // disable padding warning
|
||||
// #pragma warning(disable:4530) // Disable the exception disable but used in MSCV Stl warning.
|
||||
// #pragma warning(disable:4996) //Turn off warnings about deprecated C routines
|
||||
@@ -60,24 +60,24 @@ inline int btGetVersion()
|
||||
#define ATTRIBUTE_ALIGNED64(a) __declspec(align(64)) a
|
||||
#define ATTRIBUTE_ALIGNED128(a) __declspec (align(128)) a
|
||||
#ifdef _XBOX
|
||||
#define BT_USE_VMX128
|
||||
#define B3_USE_VMX128
|
||||
|
||||
#include <ppcintrinsics.h>
|
||||
#define BT_HAVE_NATIVE_FSEL
|
||||
#define btFsel(a,b,c) __fsel((a),(b),(c))
|
||||
#define B3_HAVE_NATIVE_FSEL
|
||||
#define b3Fsel(a,b,c) __fsel((a),(b),(c))
|
||||
#else
|
||||
|
||||
#if (defined (_WIN32) && (_MSC_VER) && _MSC_VER >= 1400) && (!defined (BT_USE_DOUBLE_PRECISION))
|
||||
#define BT_USE_SSE
|
||||
#ifdef BT_USE_SSE
|
||||
//BT_USE_SSE_IN_API is disabled under Windows by default, because
|
||||
#if (defined (_WIN32) && (_MSC_VER) && _MSC_VER >= 1400) && (!defined (B3_USE_DOUBLE_PRECISION))
|
||||
#define B3_USE_SSE
|
||||
#ifdef B3_USE_SSE
|
||||
//B3_USE_SSE_IN_API is disabled under Windows by default, because
|
||||
//it makes it harder to integrate Bullet into your application under Windows
|
||||
//(structured embedding Bullet structs/classes need to be 16-byte aligned)
|
||||
//with relatively little performance gain
|
||||
//If you are not embedded Bullet data in your classes, or make sure that you align those classes on 16-byte boundaries
|
||||
//you can manually enable this line or set it in the build system for a bit of performance gain (a few percent, dependent on usage)
|
||||
//#define BT_USE_SSE_IN_API
|
||||
#endif //BT_USE_SSE
|
||||
//#define B3_USE_SSE_IN_API
|
||||
#endif //B3_USE_SSE
|
||||
#include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
@@ -85,22 +85,22 @@ inline int btGetVersion()
|
||||
|
||||
#endif //__MINGW32__
|
||||
|
||||
#ifdef BT_DEBUG
|
||||
#ifdef B3_DEBUG
|
||||
#ifdef _MSC_VER
|
||||
#include <stdio.h>
|
||||
#define btAssert(x) { if(!(x)){printf("Assert "__FILE__ ":%u ("#x")\n", __LINE__);__debugbreak(); }}
|
||||
#define b3Assert(x) { if(!(x)){printf("Assert "__FILE__ ":%u ("#x")\n", __LINE__);__debugbreak(); }}
|
||||
#else//_MSC_VER
|
||||
#include <assert.h>
|
||||
#define btAssert assert
|
||||
#define b3Assert assert
|
||||
#endif//_MSC_VER
|
||||
#else
|
||||
#define btAssert(x)
|
||||
#define b3Assert(x)
|
||||
#endif
|
||||
//btFullAssert is optional, slows down a lot
|
||||
#define btFullAssert(x)
|
||||
//b3FullAssert is optional, slows down a lot
|
||||
#define b3FullAssert(x)
|
||||
|
||||
#define btLikely(_c) _c
|
||||
#define btUnlikely(_c) _c
|
||||
#define b3Likely(_c) _c
|
||||
#define b3Unlikely(_c) _c
|
||||
|
||||
#else
|
||||
|
||||
@@ -112,23 +112,23 @@ inline int btGetVersion()
|
||||
#ifndef assert
|
||||
#include <assert.h>
|
||||
#endif
|
||||
#ifdef BT_DEBUG
|
||||
#ifdef B3_DEBUG
|
||||
#ifdef __SPU__
|
||||
#include <spu_printf.h>
|
||||
#define printf spu_printf
|
||||
#define btAssert(x) {if(!(x)){printf("Assert "__FILE__ ":%u ("#x")\n", __LINE__);spu_hcmpeq(0,0);}}
|
||||
#define b3Assert(x) {if(!(x)){printf("Assert "__FILE__ ":%u ("#x")\n", __LINE__);spu_hcmpeq(0,0);}}
|
||||
#else
|
||||
#define btAssert assert
|
||||
#define b3Assert assert
|
||||
#endif
|
||||
|
||||
#else
|
||||
#define btAssert(x)
|
||||
#define b3Assert(x)
|
||||
#endif
|
||||
//btFullAssert is optional, slows down a lot
|
||||
#define btFullAssert(x)
|
||||
//b3FullAssert is optional, slows down a lot
|
||||
#define b3FullAssert(x)
|
||||
|
||||
#define btLikely(_c) _c
|
||||
#define btUnlikely(_c) _c
|
||||
#define b3Likely(_c) _c
|
||||
#define b3Unlikely(_c) _c
|
||||
|
||||
#else
|
||||
|
||||
@@ -141,29 +141,29 @@ inline int btGetVersion()
|
||||
#ifndef assert
|
||||
#include <assert.h>
|
||||
#endif
|
||||
#ifdef BT_DEBUG
|
||||
#define btAssert assert
|
||||
#ifdef B3_DEBUG
|
||||
#define b3Assert assert
|
||||
#else
|
||||
#define btAssert(x)
|
||||
#define b3Assert(x)
|
||||
#endif
|
||||
//btFullAssert is optional, slows down a lot
|
||||
#define btFullAssert(x)
|
||||
//b3FullAssert is optional, slows down a lot
|
||||
#define b3FullAssert(x)
|
||||
|
||||
|
||||
#define btLikely(_c) __builtin_expect((_c), 1)
|
||||
#define btUnlikely(_c) __builtin_expect((_c), 0)
|
||||
#define b3Likely(_c) __builtin_expect((_c), 1)
|
||||
#define b3Unlikely(_c) __builtin_expect((_c), 0)
|
||||
|
||||
|
||||
#else
|
||||
//non-windows systems
|
||||
|
||||
#if (defined (__APPLE__) && (!defined (BT_USE_DOUBLE_PRECISION)))
|
||||
#if (defined (__APPLE__) && (!defined (B3_USE_DOUBLE_PRECISION)))
|
||||
#if defined (__i386__) || defined (__x86_64__)
|
||||
#define BT_USE_SSE
|
||||
//BT_USE_SSE_IN_API is enabled on Mac OSX by default, because memory is automatically aligned on 16-byte boundaries
|
||||
#define B3_USE_SSE
|
||||
//B3_USE_SSE_IN_API is enabled on Mac OSX by default, because memory is automatically aligned on 16-byte boundaries
|
||||
//if apps run into issues, we will disable the next line
|
||||
#define BT_USE_SSE_IN_API
|
||||
#ifdef BT_USE_SSE
|
||||
#define B3_USE_SSE_IN_API
|
||||
#ifdef B3_USE_SSE
|
||||
// include appropriate SSE level
|
||||
#if defined (__SSE4_1__)
|
||||
#include <smmintrin.h>
|
||||
@@ -174,14 +174,14 @@ inline int btGetVersion()
|
||||
#else
|
||||
#include <emmintrin.h>
|
||||
#endif
|
||||
#endif //BT_USE_SSE
|
||||
#endif //B3_USE_SSE
|
||||
#elif defined( __armv7__ )
|
||||
#ifdef __clang__
|
||||
#define BT_USE_NEON 1
|
||||
#define B3_USE_NEON 1
|
||||
|
||||
#if defined BT_USE_NEON && defined (__clang__)
|
||||
#if defined B3_USE_NEON && defined (__clang__)
|
||||
#include <arm_neon.h>
|
||||
#endif//BT_USE_NEON
|
||||
#endif//B3_USE_NEON
|
||||
#endif //__clang__
|
||||
#endif//__arm__
|
||||
|
||||
@@ -197,7 +197,7 @@ inline int btGetVersion()
|
||||
#if defined(DEBUG) || defined (_DEBUG)
|
||||
#if defined (__i386__) || defined (__x86_64__)
|
||||
#include <stdio.h>
|
||||
#define btAssert(x)\
|
||||
#define b3Assert(x)\
|
||||
{\
|
||||
if(!(x))\
|
||||
{\
|
||||
@@ -206,16 +206,16 @@ inline int btGetVersion()
|
||||
}\
|
||||
}
|
||||
#else//defined (__i386__) || defined (__x86_64__)
|
||||
#define btAssert assert
|
||||
#define b3Assert assert
|
||||
#endif//defined (__i386__) || defined (__x86_64__)
|
||||
#else//defined(DEBUG) || defined (_DEBUG)
|
||||
#define btAssert(x)
|
||||
#define b3Assert(x)
|
||||
#endif//defined(DEBUG) || defined (_DEBUG)
|
||||
|
||||
//btFullAssert is optional, slows down a lot
|
||||
#define btFullAssert(x)
|
||||
#define btLikely(_c) _c
|
||||
#define btUnlikely(_c) _c
|
||||
//b3FullAssert is optional, slows down a lot
|
||||
#define b3FullAssert(x)
|
||||
#define b3Likely(_c) _c
|
||||
#define b3Unlikely(_c) _c
|
||||
|
||||
#else
|
||||
|
||||
@@ -232,15 +232,15 @@ inline int btGetVersion()
|
||||
#endif
|
||||
|
||||
#if defined(DEBUG) || defined (_DEBUG)
|
||||
#define btAssert assert
|
||||
#define b3Assert assert
|
||||
#else
|
||||
#define btAssert(x)
|
||||
#define b3Assert(x)
|
||||
#endif
|
||||
|
||||
//btFullAssert is optional, slows down a lot
|
||||
#define btFullAssert(x)
|
||||
#define btLikely(_c) _c
|
||||
#define btUnlikely(_c) _c
|
||||
//b3FullAssert is optional, slows down a lot
|
||||
#define b3FullAssert(x)
|
||||
#define b3Likely(_c) _c
|
||||
#define b3Unlikely(_c) _c
|
||||
#endif //__APPLE__
|
||||
|
||||
#endif // LIBSPE2
|
||||
@@ -250,31 +250,31 @@ inline int btGetVersion()
|
||||
|
||||
|
||||
///The b3Scalar type abstracts floating point numbers, to easily switch between double and single floating point precision.
|
||||
#if defined(BT_USE_DOUBLE_PRECISION)
|
||||
#if defined(B3_USE_DOUBLE_PRECISION)
|
||||
typedef double b3Scalar;
|
||||
//this number could be bigger in double precision
|
||||
#define BT_LARGE_FLOAT 1e30
|
||||
#define B3_LARGE_FLOAT 1e30
|
||||
#else
|
||||
typedef float b3Scalar;
|
||||
//keep BT_LARGE_FLOAT*BT_LARGE_FLOAT < FLT_MAX
|
||||
#define BT_LARGE_FLOAT 1e18f
|
||||
//keep B3_LARGE_FLOAT*B3_LARGE_FLOAT < FLT_MAX
|
||||
#define B3_LARGE_FLOAT 1e18f
|
||||
#endif
|
||||
|
||||
#ifdef BT_USE_SSE
|
||||
typedef __m128 btSimdFloat4;
|
||||
#endif//BT_USE_SSE
|
||||
#ifdef B3_USE_SSE
|
||||
typedef __m128 b3SimdFloat4;
|
||||
#endif//B3_USE_SSE
|
||||
|
||||
#if defined BT_USE_SSE_IN_API && defined (BT_USE_SSE)
|
||||
#if defined B3_USE_SSE_IN_API && defined (B3_USE_SSE)
|
||||
#ifdef _WIN32
|
||||
|
||||
#ifndef BT_NAN
|
||||
static int btNanMask = 0x7F800001;
|
||||
#define BT_NAN (*(float*)&btNanMask)
|
||||
#ifndef B3_NAN
|
||||
static int b3NanMask = 0x7F800001;
|
||||
#define B3_NAN (*(float*)&b3NanMask)
|
||||
#endif
|
||||
|
||||
#ifndef BT_INFINITY
|
||||
static int btInfinityMask = 0x7F800000;
|
||||
#define BT_INFINITY (*(float*)&btInfinityMask)
|
||||
#ifndef B3_INFINITY
|
||||
static int b3InfinityMask = 0x7F800000;
|
||||
#define B3_INFINITY (*(float*)&b3InfinityMask)
|
||||
#endif
|
||||
|
||||
inline __m128 operator + (const __m128 A, const __m128 B)
|
||||
@@ -292,70 +292,70 @@ inline __m128 operator * (const __m128 A, const __m128 B)
|
||||
return _mm_mul_ps(A, B);
|
||||
}
|
||||
|
||||
#define btCastfTo128i(a) (_mm_castps_si128(a))
|
||||
#define btCastfTo128d(a) (_mm_castps_pd(a))
|
||||
#define btCastiTo128f(a) (_mm_castsi128_ps(a))
|
||||
#define btCastdTo128f(a) (_mm_castpd_ps(a))
|
||||
#define btCastdTo128i(a) (_mm_castpd_si128(a))
|
||||
#define btAssign128(r0,r1,r2,r3) _mm_setr_ps(r0,r1,r2,r3)
|
||||
#define b3CastfTo128i(a) (_mm_castps_si128(a))
|
||||
#define b3CastfTo128d(a) (_mm_castps_pd(a))
|
||||
#define b3CastiTo128f(a) (_mm_castsi128_ps(a))
|
||||
#define b3CastdTo128f(a) (_mm_castpd_ps(a))
|
||||
#define b3CastdTo128i(a) (_mm_castpd_si128(a))
|
||||
#define b3Assign128(r0,r1,r2,r3) _mm_setr_ps(r0,r1,r2,r3)
|
||||
|
||||
#else//_WIN32
|
||||
|
||||
#define btCastfTo128i(a) ((__m128i)(a))
|
||||
#define btCastfTo128d(a) ((__m128d)(a))
|
||||
#define btCastiTo128f(a) ((__m128) (a))
|
||||
#define btCastdTo128f(a) ((__m128) (a))
|
||||
#define btCastdTo128i(a) ((__m128i)(a))
|
||||
#define btAssign128(r0,r1,r2,r3) (__m128){r0,r1,r2,r3}
|
||||
#define BT_INFINITY INFINITY
|
||||
#define BT_NAN NAN
|
||||
#define b3CastfTo128i(a) ((__m128i)(a))
|
||||
#define b3CastfTo128d(a) ((__m128d)(a))
|
||||
#define b3CastiTo128f(a) ((__m128) (a))
|
||||
#define b3CastdTo128f(a) ((__m128) (a))
|
||||
#define b3CastdTo128i(a) ((__m128i)(a))
|
||||
#define b3Assign128(r0,r1,r2,r3) (__m128){r0,r1,r2,r3}
|
||||
#define B3_INFINITY INFINITY
|
||||
#define B3_NAN NAN
|
||||
#endif//_WIN32
|
||||
#endif //BT_USE_SSE_IN_API
|
||||
#endif //B3_USE_SSE_IN_API
|
||||
|
||||
#ifdef BT_USE_NEON
|
||||
#ifdef B3_USE_NEON
|
||||
#include <arm_neon.h>
|
||||
|
||||
typedef float32x4_t btSimdFloat4;
|
||||
#define BT_INFINITY INFINITY
|
||||
#define BT_NAN NAN
|
||||
#define btAssign128(r0,r1,r2,r3) (float32x4_t){r0,r1,r2,r3}
|
||||
typedef float32x4_t b3SimdFloat4;
|
||||
#define B3_INFINITY INFINITY
|
||||
#define B3_NAN NAN
|
||||
#define b3Assign128(r0,r1,r2,r3) (float32x4_t){r0,r1,r2,r3}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#define BT_DECLARE_ALIGNED_ALLOCATOR() \
|
||||
SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes,16); } \
|
||||
SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \
|
||||
#define B3_DECLARE_ALIGNED_ALLOCATOR() \
|
||||
SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return b3AlignedAlloc(sizeInBytes,16); } \
|
||||
SIMD_FORCE_INLINE void operator delete(void* ptr) { b3AlignedFree(ptr); } \
|
||||
SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \
|
||||
SIMD_FORCE_INLINE void operator delete(void*, void*) { } \
|
||||
SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes,16); } \
|
||||
SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \
|
||||
SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return b3AlignedAlloc(sizeInBytes,16); } \
|
||||
SIMD_FORCE_INLINE void operator delete[](void* ptr) { b3AlignedFree(ptr); } \
|
||||
SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \
|
||||
SIMD_FORCE_INLINE void operator delete[](void*, void*) { } \
|
||||
|
||||
|
||||
|
||||
#if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS)
|
||||
#if defined(B3_USE_DOUBLE_PRECISION) || defined(B3_FORCE_DOUBLE_FUNCTIONS)
|
||||
|
||||
SIMD_FORCE_INLINE b3Scalar btSqrt(b3Scalar x) { return sqrt(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btFabs(b3Scalar x) { return fabs(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btCos(b3Scalar x) { return cos(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btSin(b3Scalar x) { return sin(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btTan(b3Scalar x) { return tan(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btAcos(b3Scalar x) { if (x<b3Scalar(-1)) x=b3Scalar(-1); if (x>b3Scalar(1)) x=b3Scalar(1); return acos(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btAsin(b3Scalar x) { if (x<b3Scalar(-1)) x=b3Scalar(-1); if (x>b3Scalar(1)) x=b3Scalar(1); return asin(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btAtan(b3Scalar x) { return atan(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btAtan2(b3Scalar x, b3Scalar y) { return atan2(x, y); }
|
||||
SIMD_FORCE_INLINE b3Scalar btExp(b3Scalar x) { return exp(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btLog(b3Scalar x) { return log(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btPow(b3Scalar x,b3Scalar y) { return pow(x,y); }
|
||||
SIMD_FORCE_INLINE b3Scalar btFmod(b3Scalar x,b3Scalar y) { return fmod(x,y); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Sqrt(b3Scalar x) { return sqrt(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Fabs(b3Scalar x) { return fabs(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Cos(b3Scalar x) { return cos(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Sin(b3Scalar x) { return sin(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Tan(b3Scalar x) { return tan(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Acos(b3Scalar x) { if (x<b3Scalar(-1)) x=b3Scalar(-1); if (x>b3Scalar(1)) x=b3Scalar(1); return acos(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Asin(b3Scalar x) { if (x<b3Scalar(-1)) x=b3Scalar(-1); if (x>b3Scalar(1)) x=b3Scalar(1); return asin(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Atan(b3Scalar x) { return atan(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Atan2(b3Scalar x, b3Scalar y) { return atan2(x, y); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Exp(b3Scalar x) { return exp(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Log(b3Scalar x) { return log(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Pow(b3Scalar x,b3Scalar y) { return pow(x,y); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Fmod(b3Scalar x,b3Scalar y) { return fmod(x,y); }
|
||||
|
||||
#else
|
||||
|
||||
SIMD_FORCE_INLINE b3Scalar btSqrt(b3Scalar y)
|
||||
SIMD_FORCE_INLINE b3Scalar b3Sqrt(b3Scalar y)
|
||||
{
|
||||
#ifdef USE_APPROXIMATION
|
||||
double x, z, tempf;
|
||||
@@ -375,30 +375,30 @@ SIMD_FORCE_INLINE b3Scalar btSqrt(b3Scalar y)
|
||||
return sqrtf(y);
|
||||
#endif
|
||||
}
|
||||
SIMD_FORCE_INLINE b3Scalar btFabs(b3Scalar x) { return fabsf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btCos(b3Scalar x) { return cosf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btSin(b3Scalar x) { return sinf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btTan(b3Scalar x) { return tanf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btAcos(b3Scalar x) {
|
||||
SIMD_FORCE_INLINE b3Scalar b3Fabs(b3Scalar x) { return fabsf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Cos(b3Scalar x) { return cosf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Sin(b3Scalar x) { return sinf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Tan(b3Scalar x) { return tanf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Acos(b3Scalar x) {
|
||||
if (x<b3Scalar(-1))
|
||||
x=b3Scalar(-1);
|
||||
if (x>b3Scalar(1))
|
||||
x=b3Scalar(1);
|
||||
return acosf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE b3Scalar btAsin(b3Scalar x) {
|
||||
SIMD_FORCE_INLINE b3Scalar b3Asin(b3Scalar x) {
|
||||
if (x<b3Scalar(-1))
|
||||
x=b3Scalar(-1);
|
||||
if (x>b3Scalar(1))
|
||||
x=b3Scalar(1);
|
||||
return asinf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE b3Scalar btAtan(b3Scalar x) { return atanf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btAtan2(b3Scalar x, b3Scalar y) { return atan2f(x, y); }
|
||||
SIMD_FORCE_INLINE b3Scalar btExp(b3Scalar x) { return expf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btLog(b3Scalar x) { return logf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar btPow(b3Scalar x,b3Scalar y) { return powf(x,y); }
|
||||
SIMD_FORCE_INLINE b3Scalar btFmod(b3Scalar x,b3Scalar y) { return fmodf(x,y); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Atan(b3Scalar x) { return atanf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Atan2(b3Scalar x, b3Scalar y) { return atan2f(x, y); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Exp(b3Scalar x) { return expf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Log(b3Scalar x) { return logf(x); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Pow(b3Scalar x,b3Scalar y) { return powf(x,y); }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Fmod(b3Scalar x,b3Scalar y) { return fmodf(x,y); }
|
||||
|
||||
#endif
|
||||
|
||||
@@ -409,10 +409,10 @@ SIMD_FORCE_INLINE b3Scalar btFmod(b3Scalar x,b3Scalar y) { return fmodf(x,y); }
|
||||
#define SIMD_DEGS_PER_RAD (b3Scalar(360.0) / SIMD_2_PI)
|
||||
#define SIMDSQRT12 b3Scalar(0.7071067811865475244008443621048490)
|
||||
|
||||
#define btRecipSqrt(x) ((b3Scalar)(b3Scalar(1.0)/btSqrt(b3Scalar(x)))) /* reciprocal square root */
|
||||
#define b3RecipSqrt(x) ((b3Scalar)(b3Scalar(1.0)/b3Sqrt(b3Scalar(x)))) /* reciprocal square root */
|
||||
|
||||
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
#ifdef B3_USE_DOUBLE_PRECISION
|
||||
#define SIMD_EPSILON DBL_EPSILON
|
||||
#define SIMD_INFINITY DBL_MAX
|
||||
#else
|
||||
@@ -420,11 +420,11 @@ SIMD_FORCE_INLINE b3Scalar btFmod(b3Scalar x,b3Scalar y) { return fmodf(x,y); }
|
||||
#define SIMD_INFINITY FLT_MAX
|
||||
#endif
|
||||
|
||||
SIMD_FORCE_INLINE b3Scalar btAtan2Fast(b3Scalar y, b3Scalar x)
|
||||
SIMD_FORCE_INLINE b3Scalar b3Atan2Fast(b3Scalar y, b3Scalar x)
|
||||
{
|
||||
b3Scalar coeff_1 = SIMD_PI / 4.0f;
|
||||
b3Scalar coeff_2 = 3.0f * coeff_1;
|
||||
b3Scalar abs_y = btFabs(y);
|
||||
b3Scalar abs_y = b3Fabs(y);
|
||||
b3Scalar angle;
|
||||
if (x >= 0.0f) {
|
||||
b3Scalar r = (x - abs_y) / (x + abs_y);
|
||||
@@ -436,35 +436,35 @@ SIMD_FORCE_INLINE b3Scalar btAtan2Fast(b3Scalar y, b3Scalar x)
|
||||
return (y < 0.0f) ? -angle : angle;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE bool btFuzzyZero(b3Scalar x) { return btFabs(x) < SIMD_EPSILON; }
|
||||
SIMD_FORCE_INLINE bool b3FuzzyZero(b3Scalar x) { return b3Fabs(x) < SIMD_EPSILON; }
|
||||
|
||||
SIMD_FORCE_INLINE bool btEqual(b3Scalar a, b3Scalar eps) {
|
||||
SIMD_FORCE_INLINE bool b3Equal(b3Scalar a, b3Scalar eps) {
|
||||
return (((a) <= eps) && !((a) < -eps));
|
||||
}
|
||||
SIMD_FORCE_INLINE bool btGreaterEqual (b3Scalar a, b3Scalar eps) {
|
||||
SIMD_FORCE_INLINE bool b3GreaterEqual (b3Scalar a, b3Scalar eps) {
|
||||
return (!((a) <= eps));
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE int btIsNegative(b3Scalar x) {
|
||||
SIMD_FORCE_INLINE int b3IsNegative(b3Scalar x) {
|
||||
return x < b3Scalar(0.0) ? 1 : 0;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE b3Scalar btRadians(b3Scalar x) { return x * SIMD_RADS_PER_DEG; }
|
||||
SIMD_FORCE_INLINE b3Scalar btDegrees(b3Scalar x) { return x * SIMD_DEGS_PER_RAD; }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Radians(b3Scalar x) { return x * SIMD_RADS_PER_DEG; }
|
||||
SIMD_FORCE_INLINE b3Scalar b3Degrees(b3Scalar x) { return x * SIMD_DEGS_PER_RAD; }
|
||||
|
||||
#define BT_DECLARE_HANDLE(name) typedef struct name##__ { int unused; } *name
|
||||
#define B3_DECLARE_HANDLE(name) typedef struct name##__ { int unused; } *name
|
||||
|
||||
#ifndef btFsel
|
||||
SIMD_FORCE_INLINE b3Scalar btFsel(b3Scalar a, b3Scalar b, b3Scalar c)
|
||||
#ifndef b3Fsel
|
||||
SIMD_FORCE_INLINE b3Scalar b3Fsel(b3Scalar a, b3Scalar b, b3Scalar c)
|
||||
{
|
||||
return a >= 0 ? b : c;
|
||||
}
|
||||
#endif
|
||||
#define btFsels(a,b,c) (b3Scalar)btFsel(a,b,c)
|
||||
#define b3Fsels(a,b,c) (b3Scalar)b3Fsel(a,b,c)
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE bool btMachineIsLittleEndian()
|
||||
SIMD_FORCE_INLINE bool b3MachineIsLittleEndian()
|
||||
{
|
||||
long int i = 1;
|
||||
const char *p = (const char *) &i;
|
||||
@@ -476,9 +476,9 @@ SIMD_FORCE_INLINE bool btMachineIsLittleEndian()
|
||||
|
||||
|
||||
|
||||
///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360
|
||||
///b3Select avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360
|
||||
///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html
|
||||
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero)
|
||||
SIMD_FORCE_INLINE unsigned b3Select(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero)
|
||||
{
|
||||
// Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero
|
||||
// Rely on positive value or'ed with its negative having sign bit on
|
||||
@@ -488,22 +488,22 @@ SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditio
|
||||
unsigned testEqz = ~testNz;
|
||||
return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
}
|
||||
SIMD_FORCE_INLINE int btSelect(unsigned condition, int valueIfConditionNonZero, int valueIfConditionZero)
|
||||
SIMD_FORCE_INLINE int b3Select(unsigned condition, int valueIfConditionNonZero, int valueIfConditionZero)
|
||||
{
|
||||
unsigned testNz = (unsigned)(((int)condition | -(int)condition) >> 31);
|
||||
unsigned testEqz = ~testNz;
|
||||
return static_cast<int>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
}
|
||||
SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero)
|
||||
SIMD_FORCE_INLINE float b3Select(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero)
|
||||
{
|
||||
#ifdef BT_HAVE_NATIVE_FSEL
|
||||
return (float)btFsel((b3Scalar)condition - b3Scalar(1.0f), valueIfConditionNonZero, valueIfConditionZero);
|
||||
#ifdef B3_HAVE_NATIVE_FSEL
|
||||
return (float)b3Fsel((b3Scalar)condition - b3Scalar(1.0f), valueIfConditionNonZero, valueIfConditionZero);
|
||||
#else
|
||||
return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename T> SIMD_FORCE_INLINE void btSwap(T& a, T& b)
|
||||
template<typename T> SIMD_FORCE_INLINE void b3Swap(T& a, T& b)
|
||||
{
|
||||
T tmp = a;
|
||||
a = b;
|
||||
@@ -512,33 +512,33 @@ template<typename T> SIMD_FORCE_INLINE void btSwap(T& a, T& b)
|
||||
|
||||
|
||||
//PCK: endian swapping functions
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val)
|
||||
SIMD_FORCE_INLINE unsigned b3SwapEndian(unsigned val)
|
||||
{
|
||||
return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val)
|
||||
SIMD_FORCE_INLINE unsigned short b3SwapEndian(unsigned short val)
|
||||
{
|
||||
return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(int val)
|
||||
SIMD_FORCE_INLINE unsigned b3SwapEndian(int val)
|
||||
{
|
||||
return btSwapEndian((unsigned)val);
|
||||
return b3SwapEndian((unsigned)val);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val)
|
||||
SIMD_FORCE_INLINE unsigned short b3SwapEndian(short val)
|
||||
{
|
||||
return btSwapEndian((unsigned short) val);
|
||||
return b3SwapEndian((unsigned short) val);
|
||||
}
|
||||
|
||||
///btSwapFloat uses using char pointers to swap the endianness
|
||||
////btSwapFloat/btSwapDouble will NOT return a float, because the machine might 'correct' invalid floating point values
|
||||
///b3SwapFloat uses using char pointers to swap the endianness
|
||||
////b3SwapFloat/b3SwapDouble will NOT return a float, because the machine might 'correct' invalid floating point values
|
||||
///Not all values of sign/exponent/mantissa are valid floating point numbers according to IEEE 754.
|
||||
///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception.
|
||||
///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you.
|
||||
///so instead of returning a float/double, we return integer/long long integer
|
||||
SIMD_FORCE_INLINE unsigned int btSwapEndianFloat(float d)
|
||||
SIMD_FORCE_INLINE unsigned int b3SwapEndianFloat(float d)
|
||||
{
|
||||
unsigned int a = 0;
|
||||
unsigned char *dst = (unsigned char *)&a;
|
||||
@@ -552,7 +552,7 @@ SIMD_FORCE_INLINE unsigned int btSwapEndianFloat(float d)
|
||||
}
|
||||
|
||||
// unswap using char pointers
|
||||
SIMD_FORCE_INLINE float btUnswapEndianFloat(unsigned int a)
|
||||
SIMD_FORCE_INLINE float b3UnswapEndianFloat(unsigned int a)
|
||||
{
|
||||
float d = 0.0f;
|
||||
unsigned char *src = (unsigned char *)&a;
|
||||
@@ -568,7 +568,7 @@ SIMD_FORCE_INLINE float btUnswapEndianFloat(unsigned int a)
|
||||
|
||||
|
||||
// swap using char pointers
|
||||
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst)
|
||||
SIMD_FORCE_INLINE void b3SwapEndianDouble(double d, unsigned char* dst)
|
||||
{
|
||||
unsigned char *src = (unsigned char *)&d;
|
||||
|
||||
@@ -584,7 +584,7 @@ SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst)
|
||||
}
|
||||
|
||||
// unswap using char pointers
|
||||
SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src)
|
||||
SIMD_FORCE_INLINE double b3UnswapEndianDouble(const unsigned char *src)
|
||||
{
|
||||
double d = 0.0;
|
||||
unsigned char *dst = (unsigned char *)&d;
|
||||
@@ -602,9 +602,9 @@ SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src)
|
||||
}
|
||||
|
||||
// returns normalized value in range [-SIMD_PI, SIMD_PI]
|
||||
SIMD_FORCE_INLINE b3Scalar btNormalizeAngle(b3Scalar angleInRadians)
|
||||
SIMD_FORCE_INLINE b3Scalar b3NormalizeAngle(b3Scalar angleInRadians)
|
||||
{
|
||||
angleInRadians = btFmod(angleInRadians, SIMD_2_PI);
|
||||
angleInRadians = b3Fmod(angleInRadians, SIMD_2_PI);
|
||||
if(angleInRadians < -SIMD_PI)
|
||||
{
|
||||
return angleInRadians + SIMD_2_PI;
|
||||
@@ -620,9 +620,9 @@ SIMD_FORCE_INLINE b3Scalar btNormalizeAngle(b3Scalar angleInRadians)
|
||||
}
|
||||
|
||||
///rudimentary class to provide type info
|
||||
struct btTypedObject
|
||||
struct b3TypedObject
|
||||
{
|
||||
btTypedObject(int objectType)
|
||||
b3TypedObject(int objectType)
|
||||
:m_objectType(objectType)
|
||||
{
|
||||
}
|
||||
@@ -636,10 +636,10 @@ struct btTypedObject
|
||||
|
||||
|
||||
///align a pointer to the provided alignment, upwards
|
||||
template <typename T>T* btAlignPointer(T* unalignedPtr, size_t alignment)
|
||||
template <typename T>T* b3AlignPointer(T* unalignedPtr, size_t alignment)
|
||||
{
|
||||
|
||||
struct btConvertPointerSizeT
|
||||
struct b3ConvertPointerSizeT
|
||||
{
|
||||
union
|
||||
{
|
||||
@@ -647,7 +647,7 @@ template <typename T>T* btAlignPointer(T* unalignedPtr, size_t alignment)
|
||||
size_t integer;
|
||||
};
|
||||
};
|
||||
btConvertPointerSizeT converter;
|
||||
b3ConvertPointerSizeT converter;
|
||||
|
||||
|
||||
const size_t bit_mask = ~(alignment - 1);
|
||||
@@ -657,4 +657,4 @@ template <typename T>T* btAlignPointer(T* unalignedPtr, size_t alignment)
|
||||
return converter.ptr;
|
||||
}
|
||||
|
||||
#endif //BT_SCALAR_H
|
||||
#endif //B3_SCALAR_H
|
||||
|
||||
@@ -17,16 +17,16 @@ StackAlloc extracted from GJK-EPA collision solver by Nathanael Presson
|
||||
Nov.2006
|
||||
*/
|
||||
|
||||
#ifndef BT_STACK_ALLOC
|
||||
#define BT_STACK_ALLOC
|
||||
#ifndef B3_STACK_ALLOC
|
||||
#define B3_STACK_ALLOC
|
||||
|
||||
#include "b3Scalar.h" //for btAssert
|
||||
#include "b3Scalar.h" //for b3Assert
|
||||
#include "b3AlignedAllocator.h"
|
||||
|
||||
///The btBlock class is an internal structure for the b3StackAlloc memory allocator.
|
||||
struct btBlock
|
||||
///The b3Block class is an internal structure for the b3StackAlloc memory allocator.
|
||||
struct b3Block
|
||||
{
|
||||
btBlock* previous;
|
||||
b3Block* previous;
|
||||
unsigned char* address;
|
||||
};
|
||||
|
||||
@@ -41,18 +41,18 @@ public:
|
||||
inline void create(unsigned int size)
|
||||
{
|
||||
destroy();
|
||||
data = (unsigned char*) btAlignedAlloc(size,16);
|
||||
data = (unsigned char*) b3AlignedAlloc(size,16);
|
||||
totalsize = size;
|
||||
}
|
||||
inline void destroy()
|
||||
{
|
||||
btAssert(usedsize==0);
|
||||
b3Assert(usedsize==0);
|
||||
//Raise(L"StackAlloc is still in use");
|
||||
|
||||
if(usedsize==0)
|
||||
{
|
||||
if(!ischild && data)
|
||||
btAlignedFree(data);
|
||||
b3AlignedFree(data);
|
||||
|
||||
data = 0;
|
||||
usedsize = 0;
|
||||
@@ -73,27 +73,27 @@ public:
|
||||
usedsize=nus;
|
||||
return(data+(usedsize-size));
|
||||
}
|
||||
btAssert(0);
|
||||
b3Assert(0);
|
||||
//&& (L"Not enough memory"));
|
||||
|
||||
return(0);
|
||||
}
|
||||
SIMD_FORCE_INLINE btBlock* beginBlock()
|
||||
SIMD_FORCE_INLINE b3Block* beginBlock()
|
||||
{
|
||||
btBlock* pb = (btBlock*)allocate(sizeof(btBlock));
|
||||
b3Block* pb = (b3Block*)allocate(sizeof(b3Block));
|
||||
pb->previous = current;
|
||||
pb->address = data+usedsize;
|
||||
current = pb;
|
||||
return(pb);
|
||||
}
|
||||
SIMD_FORCE_INLINE void endBlock(btBlock* block)
|
||||
SIMD_FORCE_INLINE void endBlock(b3Block* block)
|
||||
{
|
||||
btAssert(block==current);
|
||||
b3Assert(block==current);
|
||||
//Raise(L"Unmatched blocks");
|
||||
if(block==current)
|
||||
{
|
||||
current = block->previous;
|
||||
usedsize = (unsigned int)((block->address-data)-sizeof(btBlock));
|
||||
usedsize = (unsigned int)((block->address-data)-sizeof(b3Block));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,8 +109,8 @@ private:
|
||||
unsigned char* data;
|
||||
unsigned int totalsize;
|
||||
unsigned int usedsize;
|
||||
btBlock* current;
|
||||
b3Block* current;
|
||||
bool ischild;
|
||||
};
|
||||
|
||||
#endif //BT_STACK_ALLOC
|
||||
#endif //B3_STACK_ALLOC
|
||||
|
||||
@@ -14,16 +14,16 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
#ifndef BT_TRANSFORM_H
|
||||
#define BT_TRANSFORM_H
|
||||
#ifndef B3_TRANSFORM_H
|
||||
#define B3_TRANSFORM_H
|
||||
|
||||
|
||||
#include "b3Matrix3x3.h"
|
||||
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
#define btTransformData btTransformDoubleData
|
||||
#ifdef B3_USE_DOUBLE_PRECISION
|
||||
#define b3TransformData b3TransformDoubleData
|
||||
#else
|
||||
#define btTransformData btTransformFloatData
|
||||
#define b3TransformData b3TransformFloatData
|
||||
#endif
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ public:
|
||||
|
||||
/* void multInverseLeft(const b3Transform& t1, const b3Transform& t2) {
|
||||
b3Vector3 v = t2.m_origin - t1.m_origin;
|
||||
m_basis = btMultTransposeLeft(t1.m_basis, t2.m_basis);
|
||||
m_basis = b3MultTransposeLeft(t1.m_basis, t2.m_basis);
|
||||
m_origin = v * t1.m_basis;
|
||||
}
|
||||
*/
|
||||
@@ -206,15 +206,15 @@ public:
|
||||
return identityTransform;
|
||||
}
|
||||
|
||||
void serialize(struct btTransformData& dataOut) const;
|
||||
void serialize(struct b3TransformData& dataOut) const;
|
||||
|
||||
void serializeFloat(struct btTransformFloatData& dataOut) const;
|
||||
void serializeFloat(struct b3TransformFloatData& dataOut) const;
|
||||
|
||||
void deSerialize(const struct btTransformData& dataIn);
|
||||
void deSerialize(const struct b3TransformData& dataIn);
|
||||
|
||||
void deSerializeDouble(const struct btTransformDoubleData& dataIn);
|
||||
void deSerializeDouble(const struct b3TransformDoubleData& dataIn);
|
||||
|
||||
void deSerializeFloat(const struct btTransformFloatData& dataIn);
|
||||
void deSerializeFloat(const struct b3TransformFloatData& dataIn);
|
||||
|
||||
};
|
||||
|
||||
@@ -250,53 +250,53 @@ SIMD_FORCE_INLINE bool operator==(const b3Transform& t1, const b3Transform& t2)
|
||||
|
||||
|
||||
///for serialization
|
||||
struct btTransformFloatData
|
||||
struct b3TransformFloatData
|
||||
{
|
||||
btMatrix3x3FloatData m_basis;
|
||||
btVector3FloatData m_origin;
|
||||
b3Matrix3x3FloatData m_basis;
|
||||
b3Vector3FloatData m_origin;
|
||||
};
|
||||
|
||||
struct btTransformDoubleData
|
||||
struct b3TransformDoubleData
|
||||
{
|
||||
btMatrix3x3DoubleData m_basis;
|
||||
btVector3DoubleData m_origin;
|
||||
b3Matrix3x3DoubleData m_basis;
|
||||
b3Vector3DoubleData m_origin;
|
||||
};
|
||||
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE void b3Transform::serialize(btTransformData& dataOut) const
|
||||
SIMD_FORCE_INLINE void b3Transform::serialize(b3TransformData& dataOut) const
|
||||
{
|
||||
m_basis.serialize(dataOut.m_basis);
|
||||
m_origin.serialize(dataOut.m_origin);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Transform::serializeFloat(btTransformFloatData& dataOut) const
|
||||
SIMD_FORCE_INLINE void b3Transform::serializeFloat(b3TransformFloatData& dataOut) const
|
||||
{
|
||||
m_basis.serializeFloat(dataOut.m_basis);
|
||||
m_origin.serializeFloat(dataOut.m_origin);
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE void b3Transform::deSerialize(const btTransformData& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Transform::deSerialize(const b3TransformData& dataIn)
|
||||
{
|
||||
m_basis.deSerialize(dataIn.m_basis);
|
||||
m_origin.deSerialize(dataIn.m_origin);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Transform::deSerializeFloat(const btTransformFloatData& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Transform::deSerializeFloat(const b3TransformFloatData& dataIn)
|
||||
{
|
||||
m_basis.deSerializeFloat(dataIn.m_basis);
|
||||
m_origin.deSerializeFloat(dataIn.m_origin);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Transform::deSerializeDouble(const btTransformDoubleData& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Transform::deSerializeDouble(const b3TransformDoubleData& dataIn)
|
||||
{
|
||||
m_basis.deSerializeDouble(dataIn.m_basis);
|
||||
m_origin.deSerializeDouble(dataIn.m_origin);
|
||||
}
|
||||
|
||||
|
||||
#endif //BT_TRANSFORM_H
|
||||
#endif //B3_TRANSFORM_H
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ subject to the following restrictions:
|
||||
*/
|
||||
|
||||
|
||||
#ifndef BT_TRANSFORM_UTIL_H
|
||||
#define BT_TRANSFORM_UTIL_H
|
||||
#ifndef B3_TRANSFORM_UTIL_H
|
||||
#define B3_TRANSFORM_UTIL_H
|
||||
|
||||
#include "b3Transform.h"
|
||||
#define ANGULAR_MOTION_THRESHOLD b3Scalar(0.5)*SIMD_HALF_PI
|
||||
@@ -22,7 +22,7 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE b3Vector3 btAabbSupport(const b3Vector3& halfExtents,const b3Vector3& supportDir)
|
||||
SIMD_FORCE_INLINE b3Vector3 b3AabbSupport(const b3Vector3& halfExtents,const b3Vector3& supportDir)
|
||||
{
|
||||
return b3Vector3(supportDir.getX() < b3Scalar(0.0) ? -halfExtents.getX() : halfExtents.getX(),
|
||||
supportDir.getY() < b3Scalar(0.0) ? -halfExtents.getY() : halfExtents.getY(),
|
||||
@@ -68,9 +68,9 @@ public:
|
||||
else
|
||||
{
|
||||
// sync(fAngle) = sin(c*fAngle)/t
|
||||
axis = angvel*( btSin(b3Scalar(0.5)*fAngle*timeStep)/fAngle );
|
||||
axis = angvel*( b3Sin(b3Scalar(0.5)*fAngle*timeStep)/fAngle );
|
||||
}
|
||||
b3Quaternion dorn (axis.getX(),axis.getY(),axis.getZ(),btCos( fAngle*timeStep*b3Scalar(0.5) ));
|
||||
b3Quaternion dorn (axis.getX(),axis.getY(),axis.getZ(),b3Cos( fAngle*timeStep*b3Scalar(0.5) ));
|
||||
b3Quaternion orn0 = curTrans.getRotation();
|
||||
|
||||
b3Quaternion predictedOrn = dorn * orn0;
|
||||
@@ -106,7 +106,7 @@ public:
|
||||
if (len < SIMD_EPSILON*SIMD_EPSILON)
|
||||
axis = b3Vector3(b3Scalar(1.),b3Scalar(0.),b3Scalar(0.));
|
||||
else
|
||||
axis /= btSqrt(len);
|
||||
axis /= b3Sqrt(len);
|
||||
}
|
||||
|
||||
static void calculateVelocity(const b3Transform& transform0,const b3Transform& transform1,b3Scalar timeStep,b3Vector3& linVel,b3Vector3& angVel)
|
||||
@@ -135,15 +135,15 @@ public:
|
||||
if (len < SIMD_EPSILON*SIMD_EPSILON)
|
||||
axis = b3Vector3(b3Scalar(1.),b3Scalar(0.),b3Scalar(0.));
|
||||
else
|
||||
axis /= btSqrt(len);
|
||||
axis /= b3Sqrt(len);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
///The btConvexSeparatingDistanceUtil can help speed up convex collision detection
|
||||
///The b3ConvexSeparatingDistanceUtil can help speed up convex collision detection
|
||||
///by conservatively updating a cached separating distance/vector instead of re-calculating the closest distance
|
||||
class btConvexSeparatingDistanceUtil
|
||||
class b3ConvexSeparatingDistanceUtil
|
||||
{
|
||||
b3Quaternion m_ornA;
|
||||
b3Quaternion m_ornB;
|
||||
@@ -158,7 +158,7 @@ class btConvexSeparatingDistanceUtil
|
||||
|
||||
public:
|
||||
|
||||
btConvexSeparatingDistanceUtil(b3Scalar boundingRadiusA,b3Scalar boundingRadiusB)
|
||||
b3ConvexSeparatingDistanceUtil(b3Scalar boundingRadiusA,b3Scalar boundingRadiusB)
|
||||
:m_boundingRadiusA(boundingRadiusA),
|
||||
m_boundingRadiusB(boundingRadiusB),
|
||||
m_separatingDistance(0.f)
|
||||
@@ -224,5 +224,5 @@ public:
|
||||
};
|
||||
|
||||
|
||||
#endif //BT_TRANSFORM_UTIL_H
|
||||
#endif //B3_TRANSFORM_UTIL_H
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
*/
|
||||
|
||||
#if defined (_WIN32) || defined (__i386__)
|
||||
#define BT_USE_SSE_IN_API
|
||||
#define B3_USE_SSE_IN_API
|
||||
#endif
|
||||
|
||||
#include "b3Vector3.h"
|
||||
|
||||
#if defined (BT_USE_SSE) || defined (BT_USE_NEON)
|
||||
#if defined (B3_USE_SSE) || defined (B3_USE_NEON)
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <stdint.h>
|
||||
@@ -32,7 +32,7 @@ typedef float float4 __attribute__ ((vector_size(16)));
|
||||
//typedef uint32_t uint4 __attribute__ ((vector_size(16)));
|
||||
|
||||
|
||||
#if defined BT_USE_SSE || defined _WIN32
|
||||
#if defined B3_USE_SSE || defined _WIN32
|
||||
|
||||
#define LOG2_ARRAY_SIZE 6
|
||||
#define STACK_ARRAY_COUNT (1UL << LOG2_ARRAY_SIZE)
|
||||
@@ -44,9 +44,9 @@ long _maxdot_large( const float *vv, const float *vec, unsigned long count, floa
|
||||
{
|
||||
const float4 *vertices = (const float4*) vv;
|
||||
static const unsigned char indexTable[16] = {-1, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
|
||||
float4 dotMax = btAssign128( -BT_INFINITY, -BT_INFINITY, -BT_INFINITY, -BT_INFINITY );
|
||||
float4 dotMax = b3Assign128( -B3_INFINITY, -B3_INFINITY, -B3_INFINITY, -B3_INFINITY );
|
||||
float4 vvec = _mm_loadu_ps( vec );
|
||||
float4 vHi = btCastiTo128f(_mm_shuffle_epi32( btCastfTo128i( vvec), 0xaa )); /// zzzz
|
||||
float4 vHi = b3CastiTo128f(_mm_shuffle_epi32( b3CastfTo128i( vvec), 0xaa )); /// zzzz
|
||||
float4 vLo = _mm_movelh_ps( vvec, vvec ); /// xyxy
|
||||
|
||||
long maxIndex = -1L;
|
||||
@@ -180,7 +180,7 @@ long _maxdot_large( const float *vv, const float *vec, unsigned long count, floa
|
||||
index = 0;
|
||||
|
||||
|
||||
if( btUnlikely( count > 16) )
|
||||
if( b3Unlikely( count > 16) )
|
||||
{
|
||||
for( ; index + 4 <= count / 4; index+=4 )
|
||||
{ // do four dot products at a time. Carefully avoid touching the w element.
|
||||
@@ -429,9 +429,9 @@ long _mindot_large( const float *vv, const float *vec, unsigned long count, floa
|
||||
{
|
||||
const float4 *vertices = (const float4*) vv;
|
||||
static const unsigned char indexTable[16] = {-1, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
|
||||
float4 dotmin = btAssign128( BT_INFINITY, BT_INFINITY, BT_INFINITY, BT_INFINITY );
|
||||
float4 dotmin = b3Assign128( B3_INFINITY, B3_INFINITY, B3_INFINITY, B3_INFINITY );
|
||||
float4 vvec = _mm_loadu_ps( vec );
|
||||
float4 vHi = btCastiTo128f(_mm_shuffle_epi32( btCastfTo128i( vvec), 0xaa )); /// zzzz
|
||||
float4 vHi = b3CastiTo128f(_mm_shuffle_epi32( b3CastfTo128i( vvec), 0xaa )); /// zzzz
|
||||
float4 vLo = _mm_movelh_ps( vvec, vvec ); /// xyxy
|
||||
|
||||
long minIndex = -1L;
|
||||
@@ -565,7 +565,7 @@ long _mindot_large( const float *vv, const float *vec, unsigned long count, floa
|
||||
index = 0;
|
||||
|
||||
|
||||
if(btUnlikely( count > 16) )
|
||||
if(b3Unlikely( count > 16) )
|
||||
{
|
||||
for( ; index + 4 <= count / 4; index+=4 )
|
||||
{ // do four dot products at a time. Carefully avoid touching the w element.
|
||||
@@ -812,7 +812,7 @@ long _mindot_large( const float *vv, const float *vec, unsigned long count, floa
|
||||
}
|
||||
|
||||
|
||||
#elif defined BT_USE_NEON
|
||||
#elif defined B3_USE_NEON
|
||||
#define ARM_NEON_GCC_COMPATIBILITY 1
|
||||
#include <arm_neon.h>
|
||||
|
||||
@@ -860,8 +860,8 @@ long _maxdot_large_v0( const float *vv, const float *vec, unsigned long count, f
|
||||
float32x4_t vvec = vld1q_f32_aligned_postincrement( vec );
|
||||
float32x2_t vLo = vget_low_f32(vvec);
|
||||
float32x2_t vHi = vdup_lane_f32(vget_high_f32(vvec), 0);
|
||||
float32x2_t dotMaxLo = (float32x2_t) { -BT_INFINITY, -BT_INFINITY };
|
||||
float32x2_t dotMaxHi = (float32x2_t) { -BT_INFINITY, -BT_INFINITY };
|
||||
float32x2_t dotMaxLo = (float32x2_t) { -B3_INFINITY, -B3_INFINITY };
|
||||
float32x2_t dotMaxHi = (float32x2_t) { -B3_INFINITY, -B3_INFINITY };
|
||||
uint32x2_t indexLo = (uint32x2_t) {0, 1};
|
||||
uint32x2_t indexHi = (uint32x2_t) {2, 3};
|
||||
uint32x2_t iLo = (uint32x2_t) {-1, -1};
|
||||
@@ -1052,7 +1052,7 @@ long _maxdot_large_v1( const float *vv, const float *vec, unsigned long count, f
|
||||
const uint32x4_t four = (uint32x4_t){ 4, 4, 4, 4 };
|
||||
uint32x4_t local_index = (uint32x4_t) {0, 1, 2, 3};
|
||||
uint32x4_t index = (uint32x4_t) { -1, -1, -1, -1 };
|
||||
float32x4_t maxDot = (float32x4_t) { -BT_INFINITY, -BT_INFINITY, -BT_INFINITY, -BT_INFINITY };
|
||||
float32x4_t maxDot = (float32x4_t) { -B3_INFINITY, -B3_INFINITY, -B3_INFINITY, -B3_INFINITY };
|
||||
|
||||
unsigned long i = 0;
|
||||
for( ; i + 8 <= count; i += 8 )
|
||||
@@ -1245,8 +1245,8 @@ long _mindot_large_v0( const float *vv, const float *vec, unsigned long count, f
|
||||
float32x4_t vvec = vld1q_f32_aligned_postincrement( vec );
|
||||
float32x2_t vLo = vget_low_f32(vvec);
|
||||
float32x2_t vHi = vdup_lane_f32(vget_high_f32(vvec), 0);
|
||||
float32x2_t dotMinLo = (float32x2_t) { BT_INFINITY, BT_INFINITY };
|
||||
float32x2_t dotMinHi = (float32x2_t) { BT_INFINITY, BT_INFINITY };
|
||||
float32x2_t dotMinLo = (float32x2_t) { B3_INFINITY, B3_INFINITY };
|
||||
float32x2_t dotMinHi = (float32x2_t) { B3_INFINITY, B3_INFINITY };
|
||||
uint32x2_t indexLo = (uint32x2_t) {0, 1};
|
||||
uint32x2_t indexHi = (uint32x2_t) {2, 3};
|
||||
uint32x2_t iLo = (uint32x2_t) {-1, -1};
|
||||
@@ -1435,7 +1435,7 @@ long _mindot_large_v1( const float *vv, const float *vec, unsigned long count, f
|
||||
const uint32x4_t four = (uint32x4_t){ 4, 4, 4, 4 };
|
||||
uint32x4_t local_index = (uint32x4_t) {0, 1, 2, 3};
|
||||
uint32x4_t index = (uint32x4_t) { -1, -1, -1, -1 };
|
||||
float32x4_t minDot = (float32x4_t) { BT_INFINITY, BT_INFINITY, BT_INFINITY, BT_INFINITY };
|
||||
float32x4_t minDot = (float32x4_t) { B3_INFINITY, B3_INFINITY, B3_INFINITY, B3_INFINITY };
|
||||
|
||||
unsigned long i = 0;
|
||||
for( ; i + 8 <= count; i += 8 )
|
||||
|
||||
@@ -14,23 +14,23 @@ subject to the following restrictions:
|
||||
|
||||
|
||||
|
||||
#ifndef BT_VECTOR3_H
|
||||
#define BT_VECTOR3_H
|
||||
#ifndef B3_VECTOR3_H
|
||||
#define B3_VECTOR3_H
|
||||
|
||||
//#include <stdint.h>
|
||||
#include "b3Scalar.h"
|
||||
#include "b3MinMax.h"
|
||||
#include "b3AlignedAllocator.h"
|
||||
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
#define btVector3Data btVector3DoubleData
|
||||
#define btVector3DataName "btVector3DoubleData"
|
||||
#ifdef B3_USE_DOUBLE_PRECISION
|
||||
#define b3Vector3Data b3Vector3DoubleData
|
||||
#define b3Vector3DataName "b3Vector3DoubleData"
|
||||
#else
|
||||
#define btVector3Data btVector3FloatData
|
||||
#define btVector3DataName "btVector3FloatData"
|
||||
#endif //BT_USE_DOUBLE_PRECISION
|
||||
#define b3Vector3Data b3Vector3FloatData
|
||||
#define b3Vector3DataName "b3Vector3FloatData"
|
||||
#endif //B3_USE_DOUBLE_PRECISION
|
||||
|
||||
#if defined BT_USE_SSE
|
||||
#if defined B3_USE_SSE
|
||||
|
||||
//typedef uint32_t __m128i __attribute__ ((vector_size(16)));
|
||||
|
||||
@@ -39,35 +39,35 @@ subject to the following restrictions:
|
||||
#endif
|
||||
|
||||
|
||||
#define BT_SHUFFLE(x,y,z,w) ((w)<<6 | (z)<<4 | (y)<<2 | (x))
|
||||
//#define bt_pshufd_ps( _a, _mask ) (__m128) _mm_shuffle_epi32((__m128i)(_a), (_mask) )
|
||||
#define bt_pshufd_ps( _a, _mask ) _mm_shuffle_ps((_a), (_a), (_mask) )
|
||||
#define bt_splat3_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i, 3) )
|
||||
#define bt_splat_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i,_i) )
|
||||
#define B3_SHUFFLE(x,y,z,w) ((w)<<6 | (z)<<4 | (y)<<2 | (x))
|
||||
//#define b3_pshufd_ps( _a, _mask ) (__m128) _mm_shuffle_epi32((__m128i)(_a), (_mask) )
|
||||
#define b3_pshufd_ps( _a, _mask ) _mm_shuffle_ps((_a), (_a), (_mask) )
|
||||
#define b3_splat3_ps( _a, _i ) b3_pshufd_ps((_a), B3_SHUFFLE(_i,_i,_i, 3) )
|
||||
#define b3_splat_ps( _a, _i ) b3_pshufd_ps((_a), B3_SHUFFLE(_i,_i,_i,_i) )
|
||||
|
||||
#define btv3AbsiMask (_mm_set_epi32(0x00000000, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
|
||||
#define btvAbsMask (_mm_set_epi32( 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
|
||||
#define btvFFF0Mask (_mm_set_epi32(0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
|
||||
#define btv3AbsfMask btCastiTo128f(btv3AbsiMask)
|
||||
#define btvFFF0fMask btCastiTo128f(btvFFF0Mask)
|
||||
#define btvxyzMaskf btvFFF0fMask
|
||||
#define btvAbsfMask btCastiTo128f(btvAbsMask)
|
||||
#define b3v3AbsiMask (_mm_set_epi32(0x00000000, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
|
||||
#define b3vAbsMask (_mm_set_epi32( 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
|
||||
#define b3vFFF0Mask (_mm_set_epi32(0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
|
||||
#define b3v3AbsfMask b3CastiTo128f(b3v3AbsiMask)
|
||||
#define b3vFFF0fMask b3CastiTo128f(b3vFFF0Mask)
|
||||
#define b3vxyzMaskf b3vFFF0fMask
|
||||
#define b3vAbsfMask b3CastiTo128f(b3vAbsMask)
|
||||
|
||||
|
||||
|
||||
const __m128 ATTRIBUTE_ALIGNED16(btvMzeroMask) = {-0.0f, -0.0f, -0.0f, -0.0f};
|
||||
const __m128 ATTRIBUTE_ALIGNED16(b3vMzeroMask) = {-0.0f, -0.0f, -0.0f, -0.0f};
|
||||
const __m128 ATTRIBUTE_ALIGNED16(v1110) = {1.0f, 1.0f, 1.0f, 0.0f};
|
||||
const __m128 ATTRIBUTE_ALIGNED16(vHalf) = {0.5f, 0.5f, 0.5f, 0.5f};
|
||||
const __m128 ATTRIBUTE_ALIGNED16(v1_5) = {1.5f, 1.5f, 1.5f, 1.5f};
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef BT_USE_NEON
|
||||
#ifdef B3_USE_NEON
|
||||
|
||||
const float32x4_t ATTRIBUTE_ALIGNED16(btvMzeroMask) = (float32x4_t){-0.0f, -0.0f, -0.0f, -0.0f};
|
||||
const int32x4_t ATTRIBUTE_ALIGNED16(btvFFF0Mask) = (int32x4_t){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
|
||||
const int32x4_t ATTRIBUTE_ALIGNED16(btvAbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
|
||||
const int32x4_t ATTRIBUTE_ALIGNED16(btv3AbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x0};
|
||||
const float32x4_t ATTRIBUTE_ALIGNED16(b3vMzeroMask) = (float32x4_t){-0.0f, -0.0f, -0.0f, -0.0f};
|
||||
const int32x4_t ATTRIBUTE_ALIGNED16(b3vFFF0Mask) = (int32x4_t){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
|
||||
const int32x4_t ATTRIBUTE_ALIGNED16(b3vAbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
|
||||
const int32x4_t ATTRIBUTE_ALIGNED16(b3v3AbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x0};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -79,7 +79,7 @@ ATTRIBUTE_ALIGNED16(class) b3Vector3
|
||||
{
|
||||
public:
|
||||
|
||||
BT_DECLARE_ALIGNED_ALLOCATOR();
|
||||
B3_DECLARE_ALIGNED_ALLOCATOR();
|
||||
|
||||
#if defined (__SPU__) && defined (__CELLOS_LV2__)
|
||||
b3Scalar m_floats[4];
|
||||
@@ -90,18 +90,18 @@ public:
|
||||
}
|
||||
public:
|
||||
#else //__CELLOS_LV2__ __SPU__
|
||||
#if defined (BT_USE_SSE) || defined(BT_USE_NEON) // _WIN32 || ARM
|
||||
#if defined (B3_USE_SSE) || defined(B3_USE_NEON) // _WIN32 || ARM
|
||||
union {
|
||||
btSimdFloat4 mVec128;
|
||||
b3SimdFloat4 mVec128;
|
||||
b3Scalar m_floats[4];
|
||||
struct {b3Scalar x,y,z,w;};
|
||||
|
||||
};
|
||||
SIMD_FORCE_INLINE btSimdFloat4 get128() const
|
||||
SIMD_FORCE_INLINE b3SimdFloat4 get128() const
|
||||
{
|
||||
return mVec128;
|
||||
}
|
||||
SIMD_FORCE_INLINE void set128(btSimdFloat4 v128)
|
||||
SIMD_FORCE_INLINE void set128(b3SimdFloat4 v128)
|
||||
{
|
||||
mVec128 = v128;
|
||||
}
|
||||
@@ -133,9 +133,9 @@ public:
|
||||
m_floats[3] = b3Scalar(0.f);
|
||||
}
|
||||
|
||||
#if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) )|| defined (BT_USE_NEON)
|
||||
#if (defined (B3_USE_SSE_IN_API) && defined (B3_USE_SSE) )|| defined (B3_USE_NEON)
|
||||
// Set Vector
|
||||
SIMD_FORCE_INLINE b3Vector3( btSimdFloat4 v)
|
||||
SIMD_FORCE_INLINE b3Vector3( b3SimdFloat4 v)
|
||||
{
|
||||
mVec128 = v;
|
||||
}
|
||||
@@ -154,15 +154,15 @@ public:
|
||||
|
||||
return *this;
|
||||
}
|
||||
#endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
|
||||
#endif // #if defined (B3_USE_SSE_IN_API) || defined (B3_USE_NEON)
|
||||
|
||||
/**@brief Add a vector to this one
|
||||
* @param The vector to add to this one */
|
||||
SIMD_FORCE_INLINE b3Vector3& operator+=(const b3Vector3& v)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = _mm_add_ps(mVec128, v.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vaddq_f32(mVec128, v.mVec128);
|
||||
#else
|
||||
m_floats[0] += v.m_floats[0];
|
||||
@@ -177,9 +177,9 @@ public:
|
||||
* @param The vector to subtract */
|
||||
SIMD_FORCE_INLINE b3Vector3& operator-=(const b3Vector3& v)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = _mm_sub_ps(mVec128, v.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vsubq_f32(mVec128, v.mVec128);
|
||||
#else
|
||||
m_floats[0] -= v.m_floats[0];
|
||||
@@ -193,11 +193,11 @@ public:
|
||||
* @param s Scale factor */
|
||||
SIMD_FORCE_INLINE b3Vector3& operator*=(const b3Scalar& s)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vs = _mm_load_ss(&s); // (S 0 0 0)
|
||||
vs = bt_pshufd_ps(vs, 0x80); // (S S S 0.0)
|
||||
vs = b3_pshufd_ps(vs, 0x80); // (S S S 0.0)
|
||||
mVec128 = _mm_mul_ps(mVec128, vs);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vmulq_n_f32(mVec128, s);
|
||||
#else
|
||||
m_floats[0] *= s;
|
||||
@@ -211,13 +211,13 @@ public:
|
||||
* @param s Scale factor to divide by */
|
||||
SIMD_FORCE_INLINE b3Vector3& operator/=(const b3Scalar& s)
|
||||
{
|
||||
btFullAssert(s != b3Scalar(0.0));
|
||||
b3FullAssert(s != b3Scalar(0.0));
|
||||
|
||||
#if 0 //defined(BT_USE_SSE_IN_API)
|
||||
#if 0 //defined(B3_USE_SSE_IN_API)
|
||||
// this code is not faster !
|
||||
__m128 vs = _mm_load_ss(&s);
|
||||
vs = _mm_div_ss(v1110, vs);
|
||||
vs = bt_pshufd_ps(vs, 0x00); // (S S S S)
|
||||
vs = b3_pshufd_ps(vs, 0x00); // (S S S S)
|
||||
|
||||
mVec128 = _mm_mul_ps(mVec128, vs);
|
||||
|
||||
@@ -231,14 +231,14 @@ public:
|
||||
* @param v The other vector in the dot product */
|
||||
SIMD_FORCE_INLINE b3Scalar dot(const b3Vector3& v) const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vd = _mm_mul_ps(mVec128, v.mVec128);
|
||||
__m128 z = _mm_movehl_ps(vd, vd);
|
||||
__m128 y = _mm_shuffle_ps(vd, vd, 0x55);
|
||||
vd = _mm_add_ss(vd, y);
|
||||
vd = _mm_add_ss(vd, z);
|
||||
return _mm_cvtss_f32(vd);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
float32x4_t vd = vmulq_f32(mVec128, v.mVec128);
|
||||
float32x2_t x = vpadd_f32(vget_low_f32(vd), vget_low_f32(vd));
|
||||
x = vadd_f32(x, vget_high_f32(vd));
|
||||
@@ -259,7 +259,7 @@ public:
|
||||
/**@brief Return the length of the vector */
|
||||
SIMD_FORCE_INLINE b3Scalar length() const
|
||||
{
|
||||
return btSqrt(length2());
|
||||
return b3Sqrt(length2());
|
||||
}
|
||||
|
||||
/**@brief Return the distance squared between the ends of this and another vector
|
||||
@@ -287,7 +287,7 @@ public:
|
||||
* x^2 + y^2 + z^2 = 1 */
|
||||
SIMD_FORCE_INLINE b3Vector3& normalize()
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
// dot product first
|
||||
__m128 vd = _mm_mul_ps(mVec128, mVec128);
|
||||
__m128 z = _mm_movehl_ps(vd, vd);
|
||||
@@ -298,7 +298,7 @@ public:
|
||||
#if 0
|
||||
vd = _mm_sqrt_ss(vd);
|
||||
vd = _mm_div_ss(v1110, vd);
|
||||
vd = bt_splat_ps(vd, 0x80);
|
||||
vd = b3_splat_ps(vd, 0x80);
|
||||
mVec128 = _mm_mul_ps(mVec128, vd);
|
||||
#else
|
||||
|
||||
@@ -315,7 +315,7 @@ public:
|
||||
|
||||
y = _mm_mul_ss(y, z); // y0 * (1.5 - vd * 0.5 * y0 * y0)
|
||||
|
||||
y = bt_splat_ps(y, 0x80);
|
||||
y = b3_splat_ps(y, 0x80);
|
||||
mVec128 = _mm_mul_ps(mVec128, y);
|
||||
|
||||
#endif
|
||||
@@ -339,23 +339,23 @@ public:
|
||||
* @param v The other vector */
|
||||
SIMD_FORCE_INLINE b3Scalar angle(const b3Vector3& v) const
|
||||
{
|
||||
b3Scalar s = btSqrt(length2() * v.length2());
|
||||
btFullAssert(s != b3Scalar(0.0));
|
||||
return btAcos(dot(v) / s);
|
||||
b3Scalar s = b3Sqrt(length2() * v.length2());
|
||||
b3FullAssert(s != b3Scalar(0.0));
|
||||
return b3Acos(dot(v) / s);
|
||||
}
|
||||
|
||||
/**@brief Return a vector will the absolute values of each element */
|
||||
SIMD_FORCE_INLINE b3Vector3 absolute() const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
return b3Vector3(_mm_and_ps(mVec128, btv3AbsfMask));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Vector3(_mm_and_ps(mVec128, b3v3AbsfMask));
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Vector3(vabsq_f32(mVec128));
|
||||
#else
|
||||
return b3Vector3(
|
||||
btFabs(m_floats[0]),
|
||||
btFabs(m_floats[1]),
|
||||
btFabs(m_floats[2]));
|
||||
b3Fabs(m_floats[0]),
|
||||
b3Fabs(m_floats[1]),
|
||||
b3Fabs(m_floats[2]));
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -363,19 +363,19 @@ public:
|
||||
* @param v The other vector */
|
||||
SIMD_FORCE_INLINE b3Vector3 cross(const b3Vector3& v) const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 T, V;
|
||||
|
||||
T = bt_pshufd_ps(mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
V = bt_pshufd_ps(v.mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
T = b3_pshufd_ps(mVec128, B3_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
V = b3_pshufd_ps(v.mVec128, B3_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
|
||||
V = _mm_mul_ps(V, mVec128);
|
||||
T = _mm_mul_ps(T, v.mVec128);
|
||||
V = _mm_sub_ps(V, T);
|
||||
|
||||
V = bt_pshufd_ps(V, BT_SHUFFLE(1, 2, 0, 3));
|
||||
V = b3_pshufd_ps(V, B3_SHUFFLE(1, 2, 0, 3));
|
||||
return b3Vector3(V);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
float32x4_t T, V;
|
||||
// form (Y, Z, X, _) of mVec128 and v.mVec128
|
||||
float32x2_t Tlow = vget_low_f32(mVec128);
|
||||
@@ -389,7 +389,7 @@ public:
|
||||
Vlow = vget_low_f32(V);
|
||||
// form (Y, Z, X, _);
|
||||
V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
|
||||
V = (float32x4_t)vandq_s32((int32x4_t)V, btvFFF0Mask);
|
||||
V = (float32x4_t)vandq_s32((int32x4_t)V, b3vFFF0Mask);
|
||||
|
||||
return b3Vector3(V);
|
||||
#else
|
||||
@@ -402,16 +402,16 @@ public:
|
||||
|
||||
SIMD_FORCE_INLINE b3Scalar triple(const b3Vector3& v1, const b3Vector3& v2) const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
// cross:
|
||||
__m128 T = _mm_shuffle_ps(v1.mVec128, v1.mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
__m128 V = _mm_shuffle_ps(v2.mVec128, v2.mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
__m128 T = _mm_shuffle_ps(v1.mVec128, v1.mVec128, B3_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
__m128 V = _mm_shuffle_ps(v2.mVec128, v2.mVec128, B3_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
|
||||
|
||||
V = _mm_mul_ps(V, v1.mVec128);
|
||||
T = _mm_mul_ps(T, v2.mVec128);
|
||||
V = _mm_sub_ps(V, T);
|
||||
|
||||
V = _mm_shuffle_ps(V, V, BT_SHUFFLE(1, 2, 0, 3));
|
||||
V = _mm_shuffle_ps(V, V, B3_SHUFFLE(1, 2, 0, 3));
|
||||
|
||||
// dot:
|
||||
V = _mm_mul_ps(V, mVec128);
|
||||
@@ -421,7 +421,7 @@ public:
|
||||
V = _mm_add_ss(V, z);
|
||||
return _mm_cvtss_f32(V);
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
// cross:
|
||||
float32x4_t T, V;
|
||||
// form (Y, Z, X, _) of mVec128 and v.mVec128
|
||||
@@ -477,17 +477,17 @@ public:
|
||||
|
||||
SIMD_FORCE_INLINE void setInterpolate3(const b3Vector3& v0, const b3Vector3& v1, b3Scalar rt)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vrt = _mm_load_ss(&rt); // (rt 0 0 0)
|
||||
b3Scalar s = b3Scalar(1.0) - rt;
|
||||
__m128 vs = _mm_load_ss(&s); // (S 0 0 0)
|
||||
vs = bt_pshufd_ps(vs, 0x80); // (S S S 0.0)
|
||||
vs = b3_pshufd_ps(vs, 0x80); // (S S S 0.0)
|
||||
__m128 r0 = _mm_mul_ps(v0.mVec128, vs);
|
||||
vrt = bt_pshufd_ps(vrt, 0x80); // (rt rt rt 0.0)
|
||||
vrt = b3_pshufd_ps(vrt, 0x80); // (rt rt rt 0.0)
|
||||
__m128 r1 = _mm_mul_ps(v1.mVec128, vrt);
|
||||
__m128 tmp3 = _mm_add_ps(r0,r1);
|
||||
mVec128 = tmp3;
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vsubq_f32(v1.mVec128, v0.mVec128);
|
||||
mVec128 = vmulq_n_f32(mVec128, rt);
|
||||
mVec128 = vaddq_f32(mVec128, v0.mVec128);
|
||||
@@ -506,15 +506,15 @@ public:
|
||||
* @param t The ration of this to v (t = 0 => return this, t=1 => return other) */
|
||||
SIMD_FORCE_INLINE b3Vector3 lerp(const b3Vector3& v, const b3Scalar& t) const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vt = _mm_load_ss(&t); // (t 0 0 0)
|
||||
vt = bt_pshufd_ps(vt, 0x80); // (rt rt rt 0.0)
|
||||
vt = b3_pshufd_ps(vt, 0x80); // (rt rt rt 0.0)
|
||||
__m128 vl = _mm_sub_ps(v.mVec128, mVec128);
|
||||
vl = _mm_mul_ps(vl, vt);
|
||||
vl = _mm_add_ps(vl, mVec128);
|
||||
|
||||
return b3Vector3(vl);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
float32x4_t vl = vsubq_f32(v.mVec128, mVec128);
|
||||
vl = vmulq_n_f32(vl, t);
|
||||
vl = vaddq_f32(vl, mVec128);
|
||||
@@ -532,9 +532,9 @@ public:
|
||||
* @param v The other vector */
|
||||
SIMD_FORCE_INLINE b3Vector3& operator*=(const b3Vector3& v)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = _mm_mul_ps(mVec128, v.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vmulq_f32(mVec128, v.mVec128);
|
||||
#else
|
||||
m_floats[0] *= v.m_floats[0];
|
||||
@@ -570,7 +570,7 @@ public:
|
||||
|
||||
SIMD_FORCE_INLINE bool operator==(const b3Vector3& other) const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return (0xf == _mm_movemask_ps((__m128)_mm_cmpeq_ps(mVec128, other.mVec128)));
|
||||
#else
|
||||
return ((m_floats[3]==other.m_floats[3]) &&
|
||||
@@ -590,15 +590,15 @@ public:
|
||||
*/
|
||||
SIMD_FORCE_INLINE void setMax(const b3Vector3& other)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = _mm_max_ps(mVec128, other.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vmaxq_f32(mVec128, other.mVec128);
|
||||
#else
|
||||
btSetMax(m_floats[0], other.m_floats[0]);
|
||||
btSetMax(m_floats[1], other.m_floats[1]);
|
||||
btSetMax(m_floats[2], other.m_floats[2]);
|
||||
btSetMax(m_floats[3], other.m_floats[3]);
|
||||
b3SetMax(m_floats[0], other.m_floats[0]);
|
||||
b3SetMax(m_floats[1], other.m_floats[1]);
|
||||
b3SetMax(m_floats[2], other.m_floats[2]);
|
||||
b3SetMax(m_floats[3], other.m_floats[3]);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -607,15 +607,15 @@ public:
|
||||
*/
|
||||
SIMD_FORCE_INLINE void setMin(const b3Vector3& other)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = _mm_min_ps(mVec128, other.mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
mVec128 = vminq_f32(mVec128, other.mVec128);
|
||||
#else
|
||||
btSetMin(m_floats[0], other.m_floats[0]);
|
||||
btSetMin(m_floats[1], other.m_floats[1]);
|
||||
btSetMin(m_floats[2], other.m_floats[2]);
|
||||
btSetMin(m_floats[3], other.m_floats[3]);
|
||||
b3SetMin(m_floats[0], other.m_floats[0]);
|
||||
b3SetMin(m_floats[1], other.m_floats[1]);
|
||||
b3SetMin(m_floats[2], other.m_floats[2]);
|
||||
b3SetMin(m_floats[3], other.m_floats[3]);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -629,10 +629,10 @@ public:
|
||||
|
||||
void getSkewSymmetricMatrix(b3Vector3* v0,b3Vector3* v1,b3Vector3* v2) const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
|
||||
__m128 V = _mm_and_ps(mVec128, btvFFF0fMask);
|
||||
__m128 V0 = _mm_xor_ps(btvMzeroMask, V);
|
||||
__m128 V = _mm_and_ps(mVec128, b3vFFF0fMask);
|
||||
__m128 V0 = _mm_xor_ps(b3vMzeroMask, V);
|
||||
__m128 V2 = _mm_movelh_ps(V0, V);
|
||||
|
||||
__m128 V1 = _mm_shuffle_ps(V, V0, 0xCE);
|
||||
@@ -652,9 +652,9 @@ public:
|
||||
|
||||
void setZero()
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
mVec128 = (__m128)_mm_xor_ps(mVec128, mVec128);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
int32x4_t vi = vdupq_n_s32(0);
|
||||
mVec128 = vreinterpretq_f32_s32(vi);
|
||||
#else
|
||||
@@ -672,17 +672,17 @@ public:
|
||||
return length2() < SIMD_EPSILON;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void serialize(struct btVector3Data& dataOut) const;
|
||||
SIMD_FORCE_INLINE void serialize(struct b3Vector3Data& dataOut) const;
|
||||
|
||||
SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn);
|
||||
SIMD_FORCE_INLINE void deSerialize(const struct b3Vector3Data& dataIn);
|
||||
|
||||
SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData& dataOut) const;
|
||||
SIMD_FORCE_INLINE void serializeFloat(struct b3Vector3FloatData& dataOut) const;
|
||||
|
||||
SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn);
|
||||
SIMD_FORCE_INLINE void deSerializeFloat(const struct b3Vector3FloatData& dataIn);
|
||||
|
||||
SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData& dataOut) const;
|
||||
SIMD_FORCE_INLINE void serializeDouble(struct b3Vector3DoubleData& dataOut) const;
|
||||
|
||||
SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn);
|
||||
SIMD_FORCE_INLINE void deSerializeDouble(const struct b3Vector3DoubleData& dataIn);
|
||||
|
||||
/**@brief returns index of maximum dot product between this and vectors in array[]
|
||||
* @param array The other vectors
|
||||
@@ -699,7 +699,7 @@ public:
|
||||
/* create a vector as b3Vector3( this->dot( b3Vector3 v0 ), this->dot( b3Vector3 v1), this->dot( b3Vector3 v2 )) */
|
||||
SIMD_FORCE_INLINE b3Vector3 dot3( const b3Vector3 &v0, const b3Vector3 &v1, const b3Vector3 &v2 ) const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
|
||||
__m128 a0 = _mm_mul_ps( v0.mVec128, this->mVec128 );
|
||||
__m128 a1 = _mm_mul_ps( v1.mVec128, this->mVec128 );
|
||||
@@ -709,11 +709,11 @@ public:
|
||||
__m128 b2 = _mm_unpacklo_ps( a2, _mm_setzero_ps() );
|
||||
__m128 r = _mm_movelh_ps( b0, b2 );
|
||||
r = _mm_add_ps( r, _mm_movehl_ps( b2, b0 ));
|
||||
a2 = _mm_and_ps( a2, btvxyzMaskf);
|
||||
r = _mm_add_ps( r, btCastdTo128f (_mm_move_sd( btCastfTo128d(a2), btCastfTo128d(b1) )));
|
||||
a2 = _mm_and_ps( a2, b3vxyzMaskf);
|
||||
r = _mm_add_ps( r, b3CastdTo128f (_mm_move_sd( b3CastfTo128d(a2), b3CastfTo128d(b1) )));
|
||||
return b3Vector3(r);
|
||||
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
static const uint32x4_t xyzMask = (const uint32x4_t){ -1, -1, -1, 0 };
|
||||
float32x4_t a0 = vmulq_f32( v0.mVec128, this->mVec128);
|
||||
float32x4_t a1 = vmulq_f32( v1.mVec128, this->mVec128);
|
||||
@@ -733,9 +733,9 @@ public:
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator+(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Vector3(_mm_add_ps(v1.mVec128, v2.mVec128));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Vector3(vaddq_f32(v1.mVec128, v2.mVec128));
|
||||
#else
|
||||
return b3Vector3(
|
||||
@@ -749,9 +749,9 @@ operator+(const b3Vector3& v1, const b3Vector3& v2)
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator*(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Vector3(_mm_mul_ps(v1.mVec128, v2.mVec128));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Vector3(vmulq_f32(v1.mVec128, v2.mVec128));
|
||||
#else
|
||||
return b3Vector3(
|
||||
@@ -765,14 +765,14 @@ operator*(const b3Vector3& v1, const b3Vector3& v2)
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator-(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
#if (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))
|
||||
#if (defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE))
|
||||
|
||||
// without _mm_and_ps this code causes slowdown in Concave moving
|
||||
__m128 r = _mm_sub_ps(v1.mVec128, v2.mVec128);
|
||||
return b3Vector3(_mm_and_ps(r, btvFFF0fMask));
|
||||
#elif defined(BT_USE_NEON)
|
||||
return b3Vector3(_mm_and_ps(r, b3vFFF0fMask));
|
||||
#elif defined(B3_USE_NEON)
|
||||
float32x4_t r = vsubq_f32(v1.mVec128, v2.mVec128);
|
||||
return b3Vector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
|
||||
return b3Vector3((float32x4_t)vandq_s32((int32x4_t)r, b3vFFF0Mask));
|
||||
#else
|
||||
return b3Vector3(
|
||||
v1.m_floats[0] - v2.m_floats[0],
|
||||
@@ -785,11 +785,11 @@ operator-(const b3Vector3& v1, const b3Vector3& v2)
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator-(const b3Vector3& v)
|
||||
{
|
||||
#if (defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE))
|
||||
__m128 r = _mm_xor_ps(v.mVec128, btvMzeroMask);
|
||||
return b3Vector3(_mm_and_ps(r, btvFFF0fMask));
|
||||
#elif defined(BT_USE_NEON)
|
||||
return b3Vector3((btSimdFloat4)veorq_s32((int32x4_t)v.mVec128, (int32x4_t)btvMzeroMask));
|
||||
#if (defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE))
|
||||
__m128 r = _mm_xor_ps(v.mVec128, b3vMzeroMask);
|
||||
return b3Vector3(_mm_and_ps(r, b3vFFF0fMask));
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Vector3((b3SimdFloat4)veorq_s32((int32x4_t)v.mVec128, (int32x4_t)b3vMzeroMask));
|
||||
#else
|
||||
return b3Vector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]);
|
||||
#endif
|
||||
@@ -799,13 +799,13 @@ operator-(const b3Vector3& v)
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator*(const b3Vector3& v, const b3Scalar& s)
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
__m128 vs = _mm_load_ss(&s); // (S 0 0 0)
|
||||
vs = bt_pshufd_ps(vs, 0x80); // (S S S 0.0)
|
||||
vs = b3_pshufd_ps(vs, 0x80); // (S S S 0.0)
|
||||
return b3Vector3(_mm_mul_ps(v.mVec128, vs));
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
float32x4_t r = vmulq_n_f32(v.mVec128, s);
|
||||
return b3Vector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
|
||||
return b3Vector3((float32x4_t)vandq_s32((int32x4_t)r, b3vFFF0Mask));
|
||||
#else
|
||||
return b3Vector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s);
|
||||
#endif
|
||||
@@ -822,12 +822,12 @@ operator*(const b3Scalar& s, const b3Vector3& v)
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator/(const b3Vector3& v, const b3Scalar& s)
|
||||
{
|
||||
btFullAssert(s != b3Scalar(0.0));
|
||||
#if 0 //defined(BT_USE_SSE_IN_API)
|
||||
b3FullAssert(s != b3Scalar(0.0));
|
||||
#if 0 //defined(B3_USE_SSE_IN_API)
|
||||
// this code is not faster !
|
||||
__m128 vs = _mm_load_ss(&s);
|
||||
vs = _mm_div_ss(v1110, vs);
|
||||
vs = bt_pshufd_ps(vs, 0x00); // (S S S S)
|
||||
vs = b3_pshufd_ps(vs, 0x00); // (S S S S)
|
||||
|
||||
return b3Vector3(_mm_mul_ps(v.mVec128, vs));
|
||||
#else
|
||||
@@ -839,11 +839,11 @@ operator/(const b3Vector3& v, const b3Scalar& s)
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
operator/(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
#if (defined(BT_USE_SSE_IN_API)&& defined (BT_USE_SSE))
|
||||
#if (defined(B3_USE_SSE_IN_API)&& defined (B3_USE_SSE))
|
||||
__m128 vec = _mm_div_ps(v1.mVec128, v2.mVec128);
|
||||
vec = _mm_and_ps(vec, btvFFF0fMask);
|
||||
vec = _mm_and_ps(vec, b3vFFF0fMask);
|
||||
return b3Vector3(vec);
|
||||
#elif defined(BT_USE_NEON)
|
||||
#elif defined(B3_USE_NEON)
|
||||
float32x4_t x, y, v, m;
|
||||
|
||||
x = v1.mVec128;
|
||||
@@ -867,7 +867,7 @@ operator/(const b3Vector3& v1, const b3Vector3& v2)
|
||||
|
||||
/**@brief Return the dot product between two vectors */
|
||||
SIMD_FORCE_INLINE b3Scalar
|
||||
btDot(const b3Vector3& v1, const b3Vector3& v2)
|
||||
b3Dot(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
return v1.dot(v2);
|
||||
}
|
||||
@@ -875,7 +875,7 @@ btDot(const b3Vector3& v1, const b3Vector3& v2)
|
||||
|
||||
/**@brief Return the distance squared between two vectors */
|
||||
SIMD_FORCE_INLINE b3Scalar
|
||||
btDistance2(const b3Vector3& v1, const b3Vector3& v2)
|
||||
b3Distance2(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
return v1.distance2(v2);
|
||||
}
|
||||
@@ -883,27 +883,27 @@ btDistance2(const b3Vector3& v1, const b3Vector3& v2)
|
||||
|
||||
/**@brief Return the distance between two vectors */
|
||||
SIMD_FORCE_INLINE b3Scalar
|
||||
btDistance(const b3Vector3& v1, const b3Vector3& v2)
|
||||
b3Distance(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
return v1.distance(v2);
|
||||
}
|
||||
|
||||
/**@brief Return the angle between two vectors */
|
||||
SIMD_FORCE_INLINE b3Scalar
|
||||
btAngle(const b3Vector3& v1, const b3Vector3& v2)
|
||||
b3Angle(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
return v1.angle(v2);
|
||||
}
|
||||
|
||||
/**@brief Return the cross product of two vectors */
|
||||
SIMD_FORCE_INLINE b3Vector3
|
||||
btCross(const b3Vector3& v1, const b3Vector3& v2)
|
||||
b3Cross(const b3Vector3& v1, const b3Vector3& v2)
|
||||
{
|
||||
return v1.cross(v2);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE b3Scalar
|
||||
btTriple(const b3Vector3& v1, const b3Vector3& v2, const b3Vector3& v3)
|
||||
b3Triple(const b3Vector3& v1, const b3Vector3& v2, const b3Vector3& v3)
|
||||
{
|
||||
return v1.triple(v2, v3);
|
||||
}
|
||||
@@ -932,7 +932,7 @@ SIMD_FORCE_INLINE b3Scalar b3Vector3::distance(const b3Vector3& v) const
|
||||
|
||||
SIMD_FORCE_INLINE b3Vector3 b3Vector3::normalized() const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
b3Vector3 norm = *this;
|
||||
|
||||
return norm.normalize();
|
||||
@@ -945,23 +945,23 @@ SIMD_FORCE_INLINE b3Vector3 b3Vector3::rotate( const b3Vector3& wAxis, const b3S
|
||||
{
|
||||
// wAxis must be a unit lenght vector
|
||||
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
|
||||
__m128 O = _mm_mul_ps(wAxis.mVec128, mVec128);
|
||||
b3Scalar ssin = btSin( _angle );
|
||||
b3Scalar ssin = b3Sin( _angle );
|
||||
__m128 C = wAxis.cross( mVec128 ).mVec128;
|
||||
O = _mm_and_ps(O, btvFFF0fMask);
|
||||
b3Scalar scos = btCos( _angle );
|
||||
O = _mm_and_ps(O, b3vFFF0fMask);
|
||||
b3Scalar scos = b3Cos( _angle );
|
||||
|
||||
__m128 vsin = _mm_load_ss(&ssin); // (S 0 0 0)
|
||||
__m128 vcos = _mm_load_ss(&scos); // (S 0 0 0)
|
||||
|
||||
__m128 Y = bt_pshufd_ps(O, 0xC9); // (Y Z X 0)
|
||||
__m128 Z = bt_pshufd_ps(O, 0xD2); // (Z X Y 0)
|
||||
__m128 Y = b3_pshufd_ps(O, 0xC9); // (Y Z X 0)
|
||||
__m128 Z = b3_pshufd_ps(O, 0xD2); // (Z X Y 0)
|
||||
O = _mm_add_ps(O, Y);
|
||||
vsin = bt_pshufd_ps(vsin, 0x80); // (S S S 0)
|
||||
vsin = b3_pshufd_ps(vsin, 0x80); // (S S S 0)
|
||||
O = _mm_add_ps(O, Z);
|
||||
vcos = bt_pshufd_ps(vcos, 0x80); // (S S S 0)
|
||||
vcos = b3_pshufd_ps(vcos, 0x80); // (S S S 0)
|
||||
|
||||
vsin = vsin * C;
|
||||
O = O * wAxis.mVec128;
|
||||
@@ -979,24 +979,24 @@ SIMD_FORCE_INLINE b3Vector3 b3Vector3::rotate( const b3Vector3& wAxis, const b3S
|
||||
|
||||
_y = wAxis.cross( *this );
|
||||
|
||||
return ( o + _x * btCos( _angle ) + _y * btSin( _angle ) );
|
||||
return ( o + _x * b3Cos( _angle ) + _y * b3Sin( _angle ) );
|
||||
#endif
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE long b3Vector3::maxDot( const b3Vector3 *array, long array_count, b3Scalar &dotOut ) const
|
||||
{
|
||||
#if defined (BT_USE_SSE) || defined (BT_USE_NEON)
|
||||
#if defined _WIN32 || defined (BT_USE_SSE)
|
||||
#if defined (B3_USE_SSE) || defined (B3_USE_NEON)
|
||||
#if defined _WIN32 || defined (B3_USE_SSE)
|
||||
const long scalar_cutoff = 10;
|
||||
long _maxdot_large( const float *array, const float *vec, unsigned long array_count, float *dotOut );
|
||||
#elif defined BT_USE_NEON
|
||||
#elif defined B3_USE_NEON
|
||||
const long scalar_cutoff = 4;
|
||||
extern long (*_maxdot_large)( const float *array, const float *vec, unsigned long array_count, float *dotOut );
|
||||
#endif
|
||||
if( array_count < scalar_cutoff )
|
||||
#else
|
||||
|
||||
#endif//BT_USE_SSE || BT_USE_NEON
|
||||
#endif//B3_USE_SSE || B3_USE_NEON
|
||||
{
|
||||
b3Scalar maxDot = -SIMD_INFINITY;
|
||||
int i = 0;
|
||||
@@ -1015,18 +1015,18 @@ SIMD_FORCE_INLINE long b3Vector3::maxDot( const b3Vector3 *array, long arra
|
||||
dotOut = maxDot;
|
||||
return ptIndex;
|
||||
}
|
||||
#if defined (BT_USE_SSE) || defined (BT_USE_NEON)
|
||||
#if defined (B3_USE_SSE) || defined (B3_USE_NEON)
|
||||
return _maxdot_large( (float*) array, (float*) &m_floats[0], array_count, &dotOut );
|
||||
#endif
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE long b3Vector3::minDot( const b3Vector3 *array, long array_count, b3Scalar &dotOut ) const
|
||||
{
|
||||
#if defined (BT_USE_SSE) || defined (BT_USE_NEON)
|
||||
#if defined BT_USE_SSE
|
||||
#if defined (B3_USE_SSE) || defined (B3_USE_NEON)
|
||||
#if defined B3_USE_SSE
|
||||
const long scalar_cutoff = 10;
|
||||
long _mindot_large( const float *array, const float *vec, unsigned long array_count, float *dotOut );
|
||||
#elif defined BT_USE_NEON
|
||||
#elif defined B3_USE_NEON
|
||||
const long scalar_cutoff = 4;
|
||||
extern long (*_mindot_large)( const float *array, const float *vec, unsigned long array_count, float *dotOut );
|
||||
#else
|
||||
@@ -1034,7 +1034,7 @@ SIMD_FORCE_INLINE long b3Vector3::minDot( const b3Vector3 *array, long arra
|
||||
#endif
|
||||
|
||||
if( array_count < scalar_cutoff )
|
||||
#endif//BT_USE_SSE || BT_USE_NEON
|
||||
#endif//B3_USE_SSE || B3_USE_NEON
|
||||
{
|
||||
b3Scalar minDot = SIMD_INFINITY;
|
||||
int i = 0;
|
||||
@@ -1055,56 +1055,56 @@ SIMD_FORCE_INLINE long b3Vector3::minDot( const b3Vector3 *array, long arra
|
||||
|
||||
return ptIndex;
|
||||
}
|
||||
#if defined (BT_USE_SSE) || defined (BT_USE_NEON)
|
||||
#if defined (B3_USE_SSE) || defined (B3_USE_NEON)
|
||||
return _mindot_large( (float*) array, (float*) &m_floats[0], array_count, &dotOut );
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
class btVector4 : public b3Vector3
|
||||
class b3Vector4 : public b3Vector3
|
||||
{
|
||||
public:
|
||||
|
||||
SIMD_FORCE_INLINE btVector4() {}
|
||||
SIMD_FORCE_INLINE b3Vector4() {}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE btVector4(const b3Scalar& _x, const b3Scalar& _y, const b3Scalar& _z,const b3Scalar& _w)
|
||||
SIMD_FORCE_INLINE b3Vector4(const b3Scalar& _x, const b3Scalar& _y, const b3Scalar& _z,const b3Scalar& _w)
|
||||
: b3Vector3(_x,_y,_z)
|
||||
{
|
||||
m_floats[3] = _w;
|
||||
}
|
||||
|
||||
#if (defined (BT_USE_SSE_IN_API)&& defined (BT_USE_SSE)) || defined (BT_USE_NEON)
|
||||
SIMD_FORCE_INLINE btVector4(const btSimdFloat4 vec)
|
||||
#if (defined (B3_USE_SSE_IN_API)&& defined (B3_USE_SSE)) || defined (B3_USE_NEON)
|
||||
SIMD_FORCE_INLINE b3Vector4(const b3SimdFloat4 vec)
|
||||
{
|
||||
mVec128 = vec;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btVector4(const b3Vector3& rhs)
|
||||
SIMD_FORCE_INLINE b3Vector4(const b3Vector3& rhs)
|
||||
{
|
||||
mVec128 = rhs.mVec128;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btVector4&
|
||||
operator=(const btVector4& v)
|
||||
SIMD_FORCE_INLINE b3Vector4&
|
||||
operator=(const b3Vector4& v)
|
||||
{
|
||||
mVec128 = v.mVec128;
|
||||
return *this;
|
||||
}
|
||||
#endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
|
||||
#endif // #if defined (B3_USE_SSE_IN_API) || defined (B3_USE_NEON)
|
||||
|
||||
SIMD_FORCE_INLINE btVector4 absolute4() const
|
||||
SIMD_FORCE_INLINE b3Vector4 absolute4() const
|
||||
{
|
||||
#if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)
|
||||
return btVector4(_mm_and_ps(mVec128, btvAbsfMask));
|
||||
#elif defined(BT_USE_NEON)
|
||||
return btVector4(vabsq_f32(mVec128));
|
||||
#if defined(B3_USE_SSE_IN_API) && defined (B3_USE_SSE)
|
||||
return b3Vector4(_mm_and_ps(mVec128, b3vAbsfMask));
|
||||
#elif defined(B3_USE_NEON)
|
||||
return b3Vector4(vabsq_f32(mVec128));
|
||||
#else
|
||||
return btVector4(
|
||||
btFabs(m_floats[0]),
|
||||
btFabs(m_floats[1]),
|
||||
btFabs(m_floats[2]),
|
||||
btFabs(m_floats[3]));
|
||||
return b3Vector4(
|
||||
b3Fabs(m_floats[0]),
|
||||
b3Fabs(m_floats[1]),
|
||||
b3Fabs(m_floats[2]),
|
||||
b3Fabs(m_floats[3]));
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1115,7 +1115,7 @@ public:
|
||||
SIMD_FORCE_INLINE int maxAxis4() const
|
||||
{
|
||||
int maxIndex = -1;
|
||||
b3Scalar maxVal = b3Scalar(-BT_LARGE_FLOAT);
|
||||
b3Scalar maxVal = b3Scalar(-B3_LARGE_FLOAT);
|
||||
if (m_floats[0] > maxVal)
|
||||
{
|
||||
maxIndex = 0;
|
||||
@@ -1144,7 +1144,7 @@ public:
|
||||
SIMD_FORCE_INLINE int minAxis4() const
|
||||
{
|
||||
int minIndex = -1;
|
||||
b3Scalar minVal = b3Scalar(BT_LARGE_FLOAT);
|
||||
b3Scalar minVal = b3Scalar(B3_LARGE_FLOAT);
|
||||
if (m_floats[0] < minVal)
|
||||
{
|
||||
minIndex = 0;
|
||||
@@ -1210,10 +1210,10 @@ public:
|
||||
};
|
||||
|
||||
|
||||
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void btSwapScalarEndian(const b3Scalar& sourceVal, b3Scalar& destVal)
|
||||
///b3SwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void b3SwapScalarEndian(const b3Scalar& sourceVal, b3Scalar& destVal)
|
||||
{
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
#ifdef B3_USE_DOUBLE_PRECISION
|
||||
unsigned char* dest = (unsigned char*) &destVal;
|
||||
unsigned char* src = (unsigned char*) &sourceVal;
|
||||
dest[0] = src[7];
|
||||
@@ -1231,37 +1231,37 @@ SIMD_FORCE_INLINE void btSwapScalarEndian(const b3Scalar& sourceVal, b3Scalar& d
|
||||
dest[1] = src[2];
|
||||
dest[2] = src[1];
|
||||
dest[3] = src[0];
|
||||
#endif //BT_USE_DOUBLE_PRECISION
|
||||
#endif //B3_USE_DOUBLE_PRECISION
|
||||
}
|
||||
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void btSwapVector3Endian(const b3Vector3& sourceVec, b3Vector3& destVec)
|
||||
///b3SwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void b3SwapVector3Endian(const b3Vector3& sourceVec, b3Vector3& destVec)
|
||||
{
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
btSwapScalarEndian(sourceVec[i],destVec[i]);
|
||||
b3SwapScalarEndian(sourceVec[i],destVec[i]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void btUnSwapVector3Endian(b3Vector3& vector)
|
||||
///b3UnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void b3UnSwapVector3Endian(b3Vector3& vector)
|
||||
{
|
||||
|
||||
b3Vector3 swappedVec;
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
btSwapScalarEndian(vector[i],swappedVec[i]);
|
||||
b3SwapScalarEndian(vector[i],swappedVec[i]);
|
||||
}
|
||||
vector = swappedVec;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btPlaneSpace1 (const T& n, T& p, T& q)
|
||||
SIMD_FORCE_INLINE void b3PlaneSpace1 (const T& n, T& p, T& q)
|
||||
{
|
||||
if (btFabs(n[2]) > SIMDSQRT12) {
|
||||
if (b3Fabs(n[2]) > SIMDSQRT12) {
|
||||
// choose p in y-z plane
|
||||
b3Scalar a = n[1]*n[1] + n[2]*n[2];
|
||||
b3Scalar k = btRecipSqrt (a);
|
||||
b3Scalar k = b3RecipSqrt (a);
|
||||
p[0] = 0;
|
||||
p[1] = -n[2]*k;
|
||||
p[2] = n[1]*k;
|
||||
@@ -1273,7 +1273,7 @@ SIMD_FORCE_INLINE void btPlaneSpace1 (const T& n, T& p, T& q)
|
||||
else {
|
||||
// choose p in x-y plane
|
||||
b3Scalar a = n[0]*n[0] + n[1]*n[1];
|
||||
b3Scalar k = btRecipSqrt (a);
|
||||
b3Scalar k = b3RecipSqrt (a);
|
||||
p[0] = -n[1]*k;
|
||||
p[1] = n[0]*k;
|
||||
p[2] = 0;
|
||||
@@ -1285,56 +1285,56 @@ SIMD_FORCE_INLINE void btPlaneSpace1 (const T& n, T& p, T& q)
|
||||
}
|
||||
|
||||
|
||||
struct btVector3FloatData
|
||||
struct b3Vector3FloatData
|
||||
{
|
||||
float m_floats[4];
|
||||
};
|
||||
|
||||
struct btVector3DoubleData
|
||||
struct b3Vector3DoubleData
|
||||
{
|
||||
double m_floats[4];
|
||||
|
||||
};
|
||||
|
||||
SIMD_FORCE_INLINE void b3Vector3::serializeFloat(struct btVector3FloatData& dataOut) const
|
||||
SIMD_FORCE_INLINE void b3Vector3::serializeFloat(struct b3Vector3FloatData& dataOut) const
|
||||
{
|
||||
///could also do a memcpy, check if it is worth it
|
||||
for (int i=0;i<4;i++)
|
||||
dataOut.m_floats[i] = float(m_floats[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Vector3::deSerializeFloat(const struct btVector3FloatData& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Vector3::deSerializeFloat(const struct b3Vector3FloatData& dataIn)
|
||||
{
|
||||
for (int i=0;i<4;i++)
|
||||
m_floats[i] = b3Scalar(dataIn.m_floats[i]);
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE void b3Vector3::serializeDouble(struct btVector3DoubleData& dataOut) const
|
||||
SIMD_FORCE_INLINE void b3Vector3::serializeDouble(struct b3Vector3DoubleData& dataOut) const
|
||||
{
|
||||
///could also do a memcpy, check if it is worth it
|
||||
for (int i=0;i<4;i++)
|
||||
dataOut.m_floats[i] = double(m_floats[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Vector3::deSerializeDouble(const struct btVector3DoubleData& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Vector3::deSerializeDouble(const struct b3Vector3DoubleData& dataIn)
|
||||
{
|
||||
for (int i=0;i<4;i++)
|
||||
m_floats[i] = b3Scalar(dataIn.m_floats[i]);
|
||||
}
|
||||
|
||||
|
||||
SIMD_FORCE_INLINE void b3Vector3::serialize(struct btVector3Data& dataOut) const
|
||||
SIMD_FORCE_INLINE void b3Vector3::serialize(struct b3Vector3Data& dataOut) const
|
||||
{
|
||||
///could also do a memcpy, check if it is worth it
|
||||
for (int i=0;i<4;i++)
|
||||
dataOut.m_floats[i] = m_floats[i];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void b3Vector3::deSerialize(const struct btVector3Data& dataIn)
|
||||
SIMD_FORCE_INLINE void b3Vector3::deSerialize(const struct b3Vector3Data& dataIn)
|
||||
{
|
||||
for (int i=0;i<4;i++)
|
||||
m_floats[i] = dataIn.m_floats[i];
|
||||
}
|
||||
|
||||
#endif //BT_VECTOR3_H
|
||||
#endif //B3_VECTOR3_H
|
||||
|
||||
Reference in New Issue
Block a user