apply _clang-format to Bullet/src/LinearMath/btScalar.h, disable around preprocessor defines

(issue with clang-format lacking preprocessor indentation support, see
https://bugs.llvm.org//show_bug.cgi?id=17362
and possibly apply this patch:
https://github.com/mkurdej/clang/tree/indent-pp-directives
)
This commit is contained in:
Erwin Coumans
2017-03-13 11:17:42 -07:00
parent b088f7febe
commit bdaadf57e0

View File

@@ -12,20 +12,16 @@ subject to the following restrictions:
3. This notice may not be removed or altered from any source distribution. 3. This notice may not be removed or altered from any source distribution.
*/ */
#ifndef BT_SCALAR_H #ifndef BT_SCALAR_H
#define BT_SCALAR_H #define BT_SCALAR_H
#ifdef BT_MANAGED_CODE #ifdef BT_MANAGED_CODE
//Aligned data types not supported in managed code //Aligned data types not supported in managed code
#pragma unmanaged #pragma unmanaged
#endif #endif
#include <math.h> #include <math.h>
#include <stdlib.h>//size_t for MSVC 6.0 #include <stdlib.h> //size_t for MSVC 6.0
#include <float.h> #include <float.h>
/* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/ /* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/
@@ -36,15 +32,14 @@ inline int btGetVersion()
return BT_BULLET_VERSION; return BT_BULLET_VERSION;
} }
// clang and most formatting tools don't support indentation of preprocessor guards, so turn it off
// clang-format off
#if defined(DEBUG) || defined (_DEBUG) #if defined(DEBUG) || defined (_DEBUG)
#define BT_DEBUG #define BT_DEBUG
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
#if defined(__MINGW32__) || defined(__CYGWIN__) || (defined (_MSC_VER) && _MSC_VER < 1300) #if defined(__MINGW32__) || defined(__CYGWIN__) || (defined (_MSC_VER) && _MSC_VER < 1300)
#define SIMD_FORCE_INLINE inline #define SIMD_FORCE_INLINE inline
#define ATTRIBUTE_ALIGNED16(a) a #define ATTRIBUTE_ALIGNED16(a) a
#define ATTRIBUTE_ALIGNED64(a) a #define ATTRIBUTE_ALIGNED64(a) a
@@ -54,7 +49,7 @@ inline int btGetVersion()
#define ATTRIBUTE_ALIGNED16(a) __declspec() a #define ATTRIBUTE_ALIGNED16(a) __declspec() a
#define ATTRIBUTE_ALIGNED64(a) __declspec() a #define ATTRIBUTE_ALIGNED64(a) __declspec() a
#define ATTRIBUTE_ALIGNED128(a) __declspec () a #define ATTRIBUTE_ALIGNED128(a) __declspec () a
#else #else//__MINGW32__
//#define BT_HAS_ALIGNED_ALLOCATOR //#define BT_HAS_ALIGNED_ALLOCATOR
#pragma warning(disable : 4324) // disable padding warning #pragma warning(disable : 4324) // disable padding warning
// #pragma warning(disable:4530) // Disable the exception disable but used in MSCV Stl warning. // #pragma warning(disable:4530) // Disable the exception disable but used in MSCV Stl warning.
@@ -102,7 +97,7 @@ inline int btGetVersion()
#endif //__MINGW32__ #endif //__MINGW32__
#ifdef BT_DEBUG #ifdef BT_DEBUG
#ifdef _MSC_VER #ifdef _MSC_VER
#include <stdio.h> #include <stdio.h>
#define btAssert(x) { if(!(x)){printf("Assert "__FILE__ ":%u (%s)\n", __LINE__, #x);__debugbreak(); }} #define btAssert(x) { if(!(x)){printf("Assert "__FILE__ ":%u (%s)\n", __LINE__, #x);__debugbreak(); }}
@@ -110,18 +105,18 @@ inline int btGetVersion()
#include <assert.h> #include <assert.h>
#define btAssert assert #define btAssert assert
#endif//_MSC_VER #endif//_MSC_VER
#else #else
#define btAssert(x) #define btAssert(x)
#endif #endif
//btFullAssert is optional, slows down a lot //btFullAssert is optional, slows down a lot
#define btFullAssert(x) #define btFullAssert(x)
#define btLikely(_c) _c #define btLikely(_c) _c
#define btUnlikely(_c) _c #define btUnlikely(_c) _c
#else #else//_WIN32
#if defined (__CELLOS_LV2__) #if defined (__CELLOS_LV2__)
#define SIMD_FORCE_INLINE inline __attribute__((always_inline)) #define SIMD_FORCE_INLINE inline __attribute__((always_inline))
#define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) #define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16)))
#define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64)))
@@ -129,27 +124,27 @@ inline int btGetVersion()
#ifndef assert #ifndef assert
#include <assert.h> #include <assert.h>
#endif #endif
#ifdef BT_DEBUG #ifdef BT_DEBUG
#ifdef __SPU__ #ifdef __SPU__
#include <spu_printf.h> #include <spu_printf.h>
#define printf spu_printf #define printf spu_printf
#define btAssert(x) {if(!(x)){printf("Assert "__FILE__ ":%u ("#x")\n", __LINE__);spu_hcmpeq(0,0);}} #define btAssert(x) {if(!(x)){printf("Assert "__FILE__ ":%u ("#x")\n", __LINE__);spu_hcmpeq(0,0);}}
#else #else
#define btAssert assert #define btAssert assert
#endif #endif
#else #else//BT_DEBUG
#define btAssert(x) #define btAssert(x)
#endif #endif//BT_DEBUG
//btFullAssert is optional, slows down a lot //btFullAssert is optional, slows down a lot
#define btFullAssert(x) #define btFullAssert(x)
#define btLikely(_c) _c #define btLikely(_c) _c
#define btUnlikely(_c) _c #define btUnlikely(_c) _c
#else #else//defined (__CELLOS_LV2__)
#ifdef USE_LIBSPE2 #ifdef USE_LIBSPE2
#define SIMD_FORCE_INLINE __inline #define SIMD_FORCE_INLINE __inline
#define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) #define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16)))
@@ -158,11 +153,11 @@ inline int btGetVersion()
#ifndef assert #ifndef assert
#include <assert.h> #include <assert.h>
#endif #endif
#ifdef BT_DEBUG #ifdef BT_DEBUG
#define btAssert assert #define btAssert assert
#else #else
#define btAssert(x) #define btAssert(x)
#endif #endif
//btFullAssert is optional, slows down a lot //btFullAssert is optional, slows down a lot
#define btFullAssert(x) #define btFullAssert(x)
@@ -171,10 +166,10 @@ inline int btGetVersion()
#define btUnlikely(_c) __builtin_expect((_c), 0) #define btUnlikely(_c) __builtin_expect((_c), 0)
#else #else//USE_LIBSPE2
//non-windows systems //non-windows systems
#if (defined (__APPLE__) && (!defined (BT_USE_DOUBLE_PRECISION))) #if (defined (__APPLE__) && (!defined (BT_USE_DOUBLE_PRECISION)))
#if defined (__i386__) || defined (__x86_64__) #if defined (__i386__) || defined (__x86_64__)
#define BT_USE_SIMD_VECTOR3 #define BT_USE_SIMD_VECTOR3
#define BT_USE_SSE #define BT_USE_SSE
@@ -205,7 +200,7 @@ inline int btGetVersion()
#endif//__arm__ #endif//__arm__
#define SIMD_FORCE_INLINE inline __attribute__ ((always_inline)) #define SIMD_FORCE_INLINE inline __attribute__ ((always_inline))
///@todo: check out alignment methods for other platforms/compilers ///@todo: check out alignment methods for other platforms/compilers
#define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) #define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16)))
#define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64)))
#define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128))) #define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128)))
@@ -236,7 +231,7 @@ inline int btGetVersion()
#define btLikely(_c) _c #define btLikely(_c) _c
#define btUnlikely(_c) _c #define btUnlikely(_c) _c
#else #else//__APPLE__
#define SIMD_FORCE_INLINE inline #define SIMD_FORCE_INLINE inline
///@todo: check out alignment methods for other platforms/compilers ///@todo: check out alignment methods for other platforms/compilers
@@ -250,105 +245,105 @@ inline int btGetVersion()
#include <assert.h> #include <assert.h>
#endif #endif
#if defined(DEBUG) || defined (_DEBUG) #if defined(DEBUG) || defined (_DEBUG)
#define btAssert assert #define btAssert assert
#else #else
#define btAssert(x) #define btAssert(x)
#endif #endif
//btFullAssert is optional, slows down a lot //btFullAssert is optional, slows down a lot
#define btFullAssert(x) #define btFullAssert(x)
#define btLikely(_c) _c #define btLikely(_c) _c
#define btUnlikely(_c) _c #define btUnlikely(_c) _c
#endif //__APPLE__ #endif //__APPLE__
#endif // LIBSPE2
#endif // LIBSPE2 #endif //__CELLOS_LV2__
#endif//_WIN32
#endif //__CELLOS_LV2__
#endif
///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision. ///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision.
#if defined(BT_USE_DOUBLE_PRECISION) #if defined(BT_USE_DOUBLE_PRECISION)
typedef double btScalar;
typedef double btScalar; //this number could be bigger in double precision
//this number could be bigger in double precision #define BT_LARGE_FLOAT 1e30
#define BT_LARGE_FLOAT 1e30
#else #else
typedef float btScalar;
typedef float btScalar; //keep BT_LARGE_FLOAT*BT_LARGE_FLOAT < FLT_MAX
//keep BT_LARGE_FLOAT*BT_LARGE_FLOAT < FLT_MAX #define BT_LARGE_FLOAT 1e18f
#define BT_LARGE_FLOAT 1e18f
#endif #endif
#ifdef BT_USE_SSE #ifdef BT_USE_SSE
typedef __m128 btSimdFloat4; typedef __m128 btSimdFloat4;
#endif//BT_USE_SSE #endif //BT_USE_SSE
#if defined (BT_USE_SSE) #if defined(BT_USE_SSE)
//#if defined BT_USE_SSE_IN_API && defined (BT_USE_SSE) //#if defined BT_USE_SSE_IN_API && defined (BT_USE_SSE)
#ifdef _WIN32 #ifdef _WIN32
#ifndef BT_NAN #ifndef BT_NAN
static int btNanMask = 0x7F800001; static int btNanMask = 0x7F800001;
#define BT_NAN (*(float*)&btNanMask) #define BT_NAN (*(float *)&btNanMask)
#endif #endif
#ifndef BT_INFINITY #ifndef BT_INFINITY
static int btInfinityMask = 0x7F800000; static int btInfinityMask = 0x7F800000;
#define BT_INFINITY (*(float*)&btInfinityMask) #define BT_INFINITY (*(float *)&btInfinityMask)
inline int btGetInfinityMask()//suppress stupid compiler warning inline int btGetInfinityMask() //suppress stupid compiler warning
{ {
return btInfinityMask; return btInfinityMask;
} }
#endif #endif
//use this, in case there are clashes (such as xnamath.h)
#ifndef BT_NO_SIMD_OPERATOR_OVERLOADS
inline __m128 operator + (const __m128 A, const __m128 B) //use this, in case there are clashes (such as xnamath.h)
{ #ifndef BT_NO_SIMD_OPERATOR_OVERLOADS
inline __m128 operator+(const __m128 A, const __m128 B)
{
return _mm_add_ps(A, B); return _mm_add_ps(A, B);
} }
inline __m128 operator - (const __m128 A, const __m128 B) inline __m128 operator-(const __m128 A, const __m128 B)
{ {
return _mm_sub_ps(A, B); return _mm_sub_ps(A, B);
} }
inline __m128 operator * (const __m128 A, const __m128 B) inline __m128 operator*(const __m128 A, const __m128 B)
{ {
return _mm_mul_ps(A, B); return _mm_mul_ps(A, B);
} }
#endif //BT_NO_SIMD_OPERATOR_OVERLOADS #endif //BT_NO_SIMD_OPERATOR_OVERLOADS
#define btCastfTo128i(a) (_mm_castps_si128(a)) #define btCastfTo128i(a) (_mm_castps_si128(a))
#define btCastfTo128d(a) (_mm_castps_pd(a)) #define btCastfTo128d(a) (_mm_castps_pd(a))
#define btCastiTo128f(a) (_mm_castsi128_ps(a)) #define btCastiTo128f(a) (_mm_castsi128_ps(a))
#define btCastdTo128f(a) (_mm_castpd_ps(a)) #define btCastdTo128f(a) (_mm_castpd_ps(a))
#define btCastdTo128i(a) (_mm_castpd_si128(a)) #define btCastdTo128i(a) (_mm_castpd_si128(a))
#define btAssign128(r0,r1,r2,r3) _mm_setr_ps(r0,r1,r2,r3) #define btAssign128(r0, r1, r2, r3) _mm_setr_ps(r0, r1, r2, r3)
#else//_WIN32 #else //_WIN32
#define btCastfTo128i(a) ((__m128i)(a)) #define btCastfTo128i(a) ((__m128i)(a))
#define btCastfTo128d(a) ((__m128d)(a)) #define btCastfTo128d(a) ((__m128d)(a))
#define btCastiTo128f(a) ((__m128) (a)) #define btCastiTo128f(a) ((__m128)(a))
#define btCastdTo128f(a) ((__m128) (a)) #define btCastdTo128f(a) ((__m128)(a))
#define btCastdTo128i(a) ((__m128i)(a)) #define btCastdTo128i(a) ((__m128i)(a))
#define btAssign128(r0,r1,r2,r3) (__m128){r0,r1,r2,r3} #define btAssign128(r0, r1, r2, r3) \
#define BT_INFINITY INFINITY (__m128) { r0, r1, r2, r3 }
#define BT_NAN NAN #define BT_INFINITY INFINITY
#endif//_WIN32 #define BT_NAN NAN
#else #endif //_WIN32
#else//BT_USE_SSE
#ifdef BT_USE_NEON #ifdef BT_USE_NEON
#include <arm_neon.h> #include <arm_neon.h>
typedef float32x4_t btSimdFloat4; typedef float32x4_t btSimdFloat4;
#define BT_INFINITY INFINITY #define BT_INFINITY INFINITY
#define BT_NAN NAN #define BT_NAN NAN
#define btAssign128(r0,r1,r2,r3) (float32x4_t){r0,r1,r2,r3} #define btAssign128(r0, r1, r2, r3) \
#else//BT_USE_NEON (float32x4_t) { r0, r1, r2, r3 }
#else //BT_USE_NEON
#ifndef BT_INFINITY #ifndef BT_INFINITY
struct btInfMaskConverter struct btInfMaskConverter
@@ -357,122 +352,132 @@ inline __m128 operator * (const __m128 A, const __m128 B)
float mask; float mask;
int intmask; int intmask;
}; };
btInfMaskConverter(int mask=0x7F800000) btInfMaskConverter(int mask = 0x7F800000)
:intmask(mask) : intmask(mask)
{ {
} }
}; };
static btInfMaskConverter btInfinityMask = 0x7F800000; static btInfMaskConverter btInfinityMask = 0x7F800000;
#define BT_INFINITY (btInfinityMask.mask) #define BT_INFINITY (btInfinityMask.mask)
inline int btGetInfinityMask()//suppress stupid compiler warning inline int btGetInfinityMask() //suppress stupid compiler warning
{ {
return btInfinityMask.intmask; return btInfinityMask.intmask;
} }
#endif #endif
#endif//BT_USE_NEON #endif //BT_USE_NEON
#endif //BT_USE_SSE #endif //BT_USE_SSE
#ifdef BT_USE_NEON #ifdef BT_USE_NEON
#include <arm_neon.h> #include <arm_neon.h>
typedef float32x4_t btSimdFloat4;
#define BT_INFINITY INFINITY
#define BT_NAN NAN
#define btAssign128(r0,r1,r2,r3) (float32x4_t){r0,r1,r2,r3}
#endif
typedef float32x4_t btSimdFloat4;
#define BT_INFINITY INFINITY
#define BT_NAN NAN
#define btAssign128(r0, r1, r2, r3) \
(float32x4_t) { r0, r1, r2, r3 }
#endif//BT_USE_NEON
#define BT_DECLARE_ALIGNED_ALLOCATOR() \ #define BT_DECLARE_ALIGNED_ALLOCATOR() \
SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes,16); } \ SIMD_FORCE_INLINE void *operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void operator delete(void *ptr) { btAlignedFree(ptr); } \
SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \ SIMD_FORCE_INLINE void *operator new(size_t, void *ptr) { return ptr; } \
SIMD_FORCE_INLINE void operator delete(void*, void*) { } \ SIMD_FORCE_INLINE void operator delete(void *, void *) {} \
SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes,16); } \ SIMD_FORCE_INLINE void *operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void operator delete[](void *ptr) { btAlignedFree(ptr); } \
SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \ SIMD_FORCE_INLINE void *operator new[](size_t, void *ptr) { return ptr; } \
SIMD_FORCE_INLINE void operator delete[](void*, void*) { } \ SIMD_FORCE_INLINE void operator delete[](void *, void *) {}
#if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS) #if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS)
SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) { return sqrt(x); } SIMD_FORCE_INLINE btScalar btSqrt(btScalar x)
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); } {
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); } return sqrt(x);
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); } }
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); } SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); }
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { if (x<btScalar(-1)) x=btScalar(-1); if (x>btScalar(1)) x=btScalar(1); return acos(x); } SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); }
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { if (x<btScalar(-1)) x=btScalar(-1); if (x>btScalar(1)) x=btScalar(1); return asin(x); } SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); }
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); } SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); }
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); } SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); } {
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); } if (x < btScalar(-1)) x = btScalar(-1);
SIMD_FORCE_INLINE btScalar btPow(btScalar x,btScalar y) { return pow(x,y); } if (x > btScalar(1)) x = btScalar(1);
SIMD_FORCE_INLINE btScalar btFmod(btScalar x,btScalar y) { return fmod(x,y); } return acos(x);
}
SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
{
if (x < btScalar(-1)) x = btScalar(-1);
if (x > btScalar(1)) x = btScalar(1);
return asin(x);
}
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); }
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); }
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); }
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); }
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); }
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); }
#else #else//BT_USE_DOUBLE_PRECISION
SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) SIMD_FORCE_INLINE btScalar btSqrt(btScalar y)
{ {
#ifdef USE_APPROXIMATION #ifdef USE_APPROXIMATION
#ifdef __LP64__ #ifdef __LP64__
float xhalf = 0.5f*y; float xhalf = 0.5f * y;
int i = *(int*)&y; int i = *(int *)&y;
i = 0x5f375a86 - (i>>1); i = 0x5f375a86 - (i >> 1);
y = *(float*)&i; y = *(float *)&i;
y = y*(1.5f - xhalf*y*y); y = y * (1.5f - xhalf * y * y);
y = y*(1.5f - xhalf*y*y); y = y * (1.5f - xhalf * y * y);
y = y*(1.5f - xhalf*y*y); y = y * (1.5f - xhalf * y * y);
y=1/y; y = 1 / y;
return y; return y;
#else #else
double x, z, tempf; double x, z, tempf;
unsigned long *tfptr = ((unsigned long *)&tempf) + 1; unsigned long *tfptr = ((unsigned long *)&tempf) + 1;
tempf = y; tempf = y;
*tfptr = (0xbfcdd90a - *tfptr)>>1; /* estimate of 1/sqrt(y) */ *tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */
x = tempf; x = tempf;
z = y*btScalar(0.5); z = y * btScalar(0.5);
x = (btScalar(1.5)*x)-(x*x)*(x*z); /* iteration formula */ x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */
x = (btScalar(1.5)*x)-(x*x)*(x*z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5)*x)-(x*x)*(x*z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5)*x)-(x*x)*(x*z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5)*x)-(x*x)*(x*z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
return x*y; return x * y;
#endif #endif
#else #else
return sqrtf(y); return sqrtf(y);
#endif #endif
} }
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); } SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); }
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); } SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); }
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); } SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); }
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); } SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); }
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
if (x<btScalar(-1)) {
x=btScalar(-1); if (x < btScalar(-1))
if (x>btScalar(1)) x = btScalar(-1);
x=btScalar(1); if (x > btScalar(1))
x = btScalar(1);
return acosf(x); return acosf(x);
} }
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
if (x<btScalar(-1)) {
x=btScalar(-1); if (x < btScalar(-1))
if (x>btScalar(1)) x = btScalar(-1);
x=btScalar(1); if (x > btScalar(1))
x = btScalar(1);
return asinf(x); return asinf(x);
} }
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); } SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); }
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); } SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); }
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); } SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); }
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); } SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); }
SIMD_FORCE_INLINE btScalar btPow(btScalar x,btScalar y) { return powf(x,y); } SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); }
SIMD_FORCE_INLINE btScalar btFmod(btScalar x,btScalar y) { return fmodf(x,y); } SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); }
#endif #endif//BT_USE_DOUBLE_PRECISION
#define SIMD_PI btScalar(3.1415926535897932384626433832795029) #define SIMD_PI btScalar(3.1415926535897932384626433832795029)
#define SIMD_2_PI (btScalar(2.0) * SIMD_PI) #define SIMD_2_PI (btScalar(2.0) * SIMD_PI)
@@ -480,36 +485,40 @@ SIMD_FORCE_INLINE btScalar btFmod(btScalar x,btScalar y) { return fmodf(x,y); }
#define SIMD_RADS_PER_DEG (SIMD_2_PI / btScalar(360.0)) #define SIMD_RADS_PER_DEG (SIMD_2_PI / btScalar(360.0))
#define SIMD_DEGS_PER_RAD (btScalar(360.0) / SIMD_2_PI) #define SIMD_DEGS_PER_RAD (btScalar(360.0) / SIMD_2_PI)
#define SIMDSQRT12 btScalar(0.7071067811865475244008443621048490) #define SIMDSQRT12 btScalar(0.7071067811865475244008443621048490)
#define btRecipSqrt(x) ((btScalar)(btScalar(1.0) / btSqrt(btScalar(x)))) /* reciprocal square root */
#define btRecipSqrt(x) ((btScalar)(btScalar(1.0)/btSqrt(btScalar(x)))) /* reciprocal square root */ #define btRecip(x) (btScalar(1.0) / btScalar(x))
#define btRecip(x) (btScalar(1.0)/btScalar(x))
#ifdef BT_USE_DOUBLE_PRECISION #ifdef BT_USE_DOUBLE_PRECISION
#define SIMD_EPSILON DBL_EPSILON #define SIMD_EPSILON DBL_EPSILON
#define SIMD_INFINITY DBL_MAX #define SIMD_INFINITY DBL_MAX
#define BT_ONE 1.0 #define BT_ONE 1.0
#define BT_ZERO 0.0 #define BT_ZERO 0.0
#define BT_TWO 2.0 #define BT_TWO 2.0
#define BT_HALF 0.5 #define BT_HALF 0.5
#else #else
#define SIMD_EPSILON FLT_EPSILON #define SIMD_EPSILON FLT_EPSILON
#define SIMD_INFINITY FLT_MAX #define SIMD_INFINITY FLT_MAX
#define BT_ONE 1.0f #define BT_ONE 1.0f
#define BT_ZERO 0.0f #define BT_ZERO 0.0f
#define BT_TWO 2.0f #define BT_TWO 2.0f
#define BT_HALF 0.5f #define BT_HALF 0.5f
#endif #endif
// clang-format on
SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x)
{ {
btScalar coeff_1 = SIMD_PI / 4.0f; btScalar coeff_1 = SIMD_PI / 4.0f;
btScalar coeff_2 = 3.0f * coeff_1; btScalar coeff_2 = 3.0f * coeff_1;
btScalar abs_y = btFabs(y); btScalar abs_y = btFabs(y);
btScalar angle; btScalar angle;
if (x >= 0.0f) { if (x >= 0.0f)
{
btScalar r = (x - abs_y) / (x + abs_y); btScalar r = (x - abs_y) / (x + abs_y);
angle = coeff_1 - coeff_1 * r; angle = coeff_1 - coeff_1 * r;
} else { }
else
{
btScalar r = (x + abs_y) / (abs_y - x); btScalar r = (x + abs_y) / (abs_y - x);
angle = coeff_2 - coeff_1 * r; angle = coeff_2 - coeff_1 * r;
} }
@@ -518,22 +527,28 @@ SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x)
SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; } SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; }
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) { SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps)
{
return (((a) <= eps) && !((a) < -eps)); return (((a) <= eps) && !((a) < -eps));
} }
SIMD_FORCE_INLINE bool btGreaterEqual (btScalar a, btScalar eps) { SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps)
{
return (!((a) <= eps)); return (!((a) <= eps));
} }
SIMD_FORCE_INLINE int btIsNegative(btScalar x)
SIMD_FORCE_INLINE int btIsNegative(btScalar x) { {
return x < btScalar(0.0) ? 1 : 0; return x < btScalar(0.0) ? 1 : 0;
} }
SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; } SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; }
SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; } SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; }
#define BT_DECLARE_HANDLE(name) typedef struct name##__ { int unused; } *name #define BT_DECLARE_HANDLE(name) \
typedef struct name##__ \
{ \
int unused; \
} * name
#ifndef btFsel #ifndef btFsel
SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c)
@@ -541,21 +556,18 @@ SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c)
return a >= 0 ? b : c; return a >= 0 ? b : c;
} }
#endif #endif
#define btFsels(a,b,c) (btScalar)btFsel(a,b,c) #define btFsels(a, b, c) (btScalar) btFsel(a, b, c)
SIMD_FORCE_INLINE bool btMachineIsLittleEndian() SIMD_FORCE_INLINE bool btMachineIsLittleEndian()
{ {
long int i = 1; long int i = 1;
const char *p = (const char *) &i; const char *p = (const char *)&i;
if (p[0] == 1) // Lowest address contains the least significant byte if (p[0] == 1) // Lowest address contains the least significant byte
return true; return true;
else else
return false; return false;
} }
///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360 ///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360
///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html ///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero)
@@ -583,14 +595,14 @@ SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZe
#endif #endif
} }
template<typename T> SIMD_FORCE_INLINE void btSwap(T& a, T& b) template <typename T>
SIMD_FORCE_INLINE void btSwap(T &a, T &b)
{ {
T tmp = a; T tmp = a;
a = b; a = b;
b = tmp; b = tmp;
} }
//PCK: endian swapping functions //PCK: endian swapping functions
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val)
{ {
@@ -609,7 +621,7 @@ SIMD_FORCE_INLINE unsigned btSwapEndian(int val)
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) SIMD_FORCE_INLINE unsigned short btSwapEndian(short val)
{ {
return btSwapEndian((unsigned short) val); return btSwapEndian((unsigned short)val);
} }
///btSwapFloat uses using char pointers to swap the endianness ///btSwapFloat uses using char pointers to swap the endianness
@@ -646,9 +658,8 @@ SIMD_FORCE_INLINE float btUnswapEndianFloat(unsigned int a)
return d; return d;
} }
// swap using char pointers // swap using char pointers
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst) SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char *dst)
{ {
unsigned char *src = (unsigned char *)&d; unsigned char *src = (unsigned char *)&d;
@@ -660,7 +671,6 @@ SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst)
dst[5] = src[2]; dst[5] = src[2];
dst[6] = src[1]; dst[6] = src[1];
dst[7] = src[0]; dst[7] = src[0];
} }
// unswap using char pointers // unswap using char pointers
@@ -681,10 +691,10 @@ SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src)
return d; return d;
} }
template<typename T> template <typename T>
SIMD_FORCE_INLINE void btSetZero(T* a, int n) SIMD_FORCE_INLINE void btSetZero(T *a, int n)
{ {
T* acurr = a; T *acurr = a;
size_t ncurr = n; size_t ncurr = n;
while (ncurr > 0) while (ncurr > 0)
{ {
@@ -693,16 +703,18 @@ SIMD_FORCE_INLINE void btSetZero(T* a, int n)
} }
} }
SIMD_FORCE_INLINE btScalar btLargeDot(const btScalar *a, const btScalar *b, int n) SIMD_FORCE_INLINE btScalar btLargeDot(const btScalar *a, const btScalar *b, int n)
{ {
btScalar p0,q0,m0,p1,q1,m1,sum; btScalar p0, q0, m0, p1, q1, m1, sum;
sum = 0; sum = 0;
n -= 2; n -= 2;
while (n >= 0) { while (n >= 0)
p0 = a[0]; q0 = b[0]; {
p0 = a[0];
q0 = b[0];
m0 = p0 * q0; m0 = p0 * q0;
p1 = a[1]; q1 = b[1]; p1 = a[1];
q1 = b[1];
m1 = p1 * q1; m1 = p1 * q1;
sum += m0; sum += m0;
sum += m1; sum += m1;
@@ -711,7 +723,8 @@ SIMD_FORCE_INLINE btScalar btLargeDot(const btScalar *a, const btScalar *b, int
n -= 2; n -= 2;
} }
n += 2; n += 2;
while (n > 0) { while (n > 0)
{
sum += (*a) * (*b); sum += (*a) * (*b);
a++; a++;
b++; b++;
@@ -720,16 +733,15 @@ SIMD_FORCE_INLINE btScalar btLargeDot(const btScalar *a, const btScalar *b, int
return sum; return sum;
} }
// returns normalized value in range [-SIMD_PI, SIMD_PI] // returns normalized value in range [-SIMD_PI, SIMD_PI]
SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians)
{ {
angleInRadians = btFmod(angleInRadians, SIMD_2_PI); angleInRadians = btFmod(angleInRadians, SIMD_2_PI);
if(angleInRadians < -SIMD_PI) if (angleInRadians < -SIMD_PI)
{ {
return angleInRadians + SIMD_2_PI; return angleInRadians + SIMD_2_PI;
} }
else if(angleInRadians > SIMD_PI) else if (angleInRadians > SIMD_PI)
{ {
return angleInRadians - SIMD_2_PI; return angleInRadians - SIMD_2_PI;
} }
@@ -739,13 +751,11 @@ SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians)
} }
} }
///rudimentary class to provide type info ///rudimentary class to provide type info
struct btTypedObject struct btTypedObject
{ {
btTypedObject(int objectType) btTypedObject(int objectType)
:m_objectType(objectType) : m_objectType(objectType)
{ {
} }
int m_objectType; int m_objectType;
@@ -755,29 +765,24 @@ struct btTypedObject
} }
}; };
///align a pointer to the provided alignment, upwards ///align a pointer to the provided alignment, upwards
template <typename T>T* btAlignPointer(T* unalignedPtr, size_t alignment) template <typename T>
T *btAlignPointer(T *unalignedPtr, size_t alignment)
{ {
struct btConvertPointerSizeT struct btConvertPointerSizeT
{ {
union union {
{ T *ptr;
T* ptr;
size_t integer; size_t integer;
}; };
}; };
btConvertPointerSizeT converter; btConvertPointerSizeT converter;
const size_t bit_mask = ~(alignment - 1); const size_t bit_mask = ~(alignment - 1);
converter.ptr = unalignedPtr; converter.ptr = unalignedPtr;
converter.integer += alignment-1; converter.integer += alignment - 1;
converter.integer &= bit_mask; converter.integer &= bit_mask;
return converter.ptr; return converter.ptr;
} }
#endif //BT_SCALAR_H #endif //BT_SCALAR_H