change lcpp Lua preprocessor, to keep #defines and comments, remove empty lines

remove duplicate data in b3Contact4 (now in btContact4Data shared between CPU/C++ and OpenCL)
OpenCL kernels use #include "Bullet3Collision/NarrowPhaseCollision/shared/b3Contact4Data.h"
Increase number of batches back to 250 (from 50), need to fix this hard coded number (see https://github.com/erwincoumans/bullet3/issues/12)
Work towards GJK/EPA, in addition to SAT/clipping (early on)
This commit is contained in:
erwincoumans
2013-08-08 12:24:09 -07:00
parent 46a08e3282
commit 3bf003ace1
50 changed files with 920 additions and 2731 deletions

View File

@@ -2,59 +2,45 @@
static const char* boundSearchKernelsCL= \
"/*\n"
"Copyright (c) 2012 Advanced Micro Devices, Inc. \n"
"\n"
"This software is provided 'as-is', without any express or implied warranty.\n"
"In no event will the authors be held liable for any damages arising from the use of this software.\n"
"Permission is granted to anyone to use this software for any purpose, \n"
"including commercial applications, and to alter it and redistribute it freely, \n"
"subject to the following restrictions:\n"
"\n"
"1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.\n"
"2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.\n"
"3. This notice may not be removed or altered from any source distribution.\n"
"*/\n"
"//Originally written by Takahiro Harada\n"
"\n"
"\n"
"typedef unsigned int u32;\n"
"#define GET_GROUP_IDX get_group_id(0)\n"
"#define GET_LOCAL_IDX get_local_id(0)\n"
"#define GET_GLOBAL_IDX get_global_id(0)\n"
"#define GET_GROUP_SIZE get_local_size(0)\n"
"#define GROUP_LDS_BARRIER barrier(CLK_LOCAL_MEM_FENCE)\n"
"\n"
"typedef struct\n"
"{\n"
" u32 m_key; \n"
" u32 m_value;\n"
"}SortData;\n"
"\n"
"\n"
"\n"
"typedef struct\n"
"{\n"
" u32 m_nSrc;\n"
" u32 m_nDst;\n"
" u32 m_padding[2];\n"
"} ConstBuffer;\n"
"\n"
"\n"
"\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"__kernel\n"
"void SearchSortDataLowerKernel(__global SortData* src, __global u32 *dst, \n"
" unsigned int nSrc, unsigned int nDst)\n"
"{\n"
" int gIdx = GET_GLOBAL_IDX;\n"
"\n"
" if( gIdx < nSrc )\n"
" {\n"
" SortData first; first.m_key = (u32)(-1); first.m_value = (u32)(-1);\n"
" SortData end; end.m_key = nDst; end.m_value = nDst;\n"
"\n"
" SortData iData = (gIdx==0)? first: src[gIdx-1];\n"
" SortData jData = (gIdx==nSrc)? end: src[gIdx];\n"
"\n"
" if( iData.m_key != jData.m_key )\n"
" {\n"
"// for(u32 k=iData.m_key+1; k<=min(jData.m_key, nDst-1); k++)\n"
@@ -65,23 +51,18 @@ static const char* boundSearchKernelsCL= \
" }\n"
" }\n"
"}\n"
"\n"
"\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"__kernel\n"
"void SearchSortDataUpperKernel(__global SortData* src, __global u32 *dst, \n"
" unsigned int nSrc, unsigned int nDst)\n"
"{\n"
" int gIdx = GET_GLOBAL_IDX+1;\n"
"\n"
" if( gIdx < nSrc+1 )\n"
" {\n"
" SortData first; first.m_key = 0; first.m_value = 0;\n"
" SortData end; end.m_key = nDst; end.m_value = nDst;\n"
"\n"
" SortData iData = src[gIdx-1];\n"
" SortData jData = (gIdx==nSrc)? end: src[gIdx];\n"
"\n"
" if( iData.m_key != jData.m_key )\n"
" {\n"
" u32 k = iData.m_key;\n"
@@ -91,7 +72,6 @@ static const char* boundSearchKernelsCL= \
" }\n"
" }\n"
"}\n"
"\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"__kernel\n"
"void SubtractKernel(__global u32* A, __global u32 *B, __global u32 *C, \n"
@@ -99,11 +79,9 @@ static const char* boundSearchKernelsCL= \
"{\n"
" int gIdx = GET_GLOBAL_IDX;\n"
" \n"
"\n"
" if( gIdx < nDst )\n"
" {\n"
" C[gIdx] = A[gIdx] - B[gIdx];\n"
" }\n"
"}\n"
"\n"
;

View File

@@ -2,23 +2,18 @@
static const char* fillKernelsCL= \
"/*\n"
"Copyright (c) 2012 Advanced Micro Devices, Inc. \n"
"\n"
"This software is provided 'as-is', without any express or implied warranty.\n"
"In no event will the authors be held liable for any damages arising from the use of this software.\n"
"Permission is granted to anyone to use this software for any purpose, \n"
"including commercial applications, and to alter it and redistribute it freely, \n"
"subject to the following restrictions:\n"
"\n"
"1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.\n"
"2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.\n"
"3. This notice may not be removed or altered from any source distribution.\n"
"*/\n"
"//Originally written by Takahiro Harada\n"
"\n"
"\n"
"#pragma OPENCL EXTENSION cl_amd_printf : enable\n"
"#pragma OPENCL EXTENSION cl_khr_local_int32_base_atomics : enable\n"
"\n"
"typedef unsigned int u32;\n"
"#define GET_GROUP_IDX get_group_id(0)\n"
"#define GET_LOCAL_IDX get_local_id(0)\n"
@@ -28,11 +23,9 @@ static const char* fillKernelsCL= \
"#define GROUP_MEM_FENCE mem_fence(CLK_LOCAL_MEM_FENCE)\n"
"#define AtomInc(x) atom_inc(&(x))\n"
"#define AtomInc1(x, out) out = atom_inc(&(x))\n"
"\n"
"#define make_uint4 (uint4)\n"
"#define make_uint2 (uint2)\n"
"#define make_int2 (int2)\n"
"\n"
"typedef struct\n"
"{\n"
" union\n"
@@ -45,66 +38,54 @@ static const char* fillKernelsCL= \
" int m_n;\n"
" int m_padding[2];\n"
"} ConstBuffer;\n"
"\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"void FillIntKernel(__global int* dstInt, int num_elements, int value, const int offset)\n"
"{\n"
" int gIdx = GET_GLOBAL_IDX;\n"
"\n"
" if( gIdx < num_elements )\n"
" {\n"
" dstInt[ offset+gIdx ] = value;\n"
" }\n"
"}\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"void FillFloatKernel(__global float* dstFloat, int num_elements, float value, const int offset)\n"
"{\n"
" int gIdx = GET_GLOBAL_IDX;\n"
"\n"
" if( gIdx < num_elements )\n"
" {\n"
" dstFloat[ offset+gIdx ] = value;\n"
" }\n"
"}\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"void FillUnsignedIntKernel(__global unsigned int* dstInt, const int num, const unsigned int value, const int offset)\n"
"{\n"
" int gIdx = GET_GLOBAL_IDX;\n"
"\n"
" if( gIdx < num )\n"
" {\n"
" dstInt[ offset+gIdx ] = value;\n"
" }\n"
"}\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"void FillInt2Kernel(__global int2* dstInt2, const int num, const int2 value, const int offset)\n"
"{\n"
" int gIdx = GET_GLOBAL_IDX;\n"
"\n"
" if( gIdx < num )\n"
" {\n"
" dstInt2[ gIdx + offset] = make_int2( value.x, value.y );\n"
" }\n"
"}\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(64,1,1)))\n"
"void FillInt4Kernel(__global int4* dstInt4, const int num, const int4 value, const int offset)\n"
"{\n"
" int gIdx = GET_GLOBAL_IDX;\n"
"\n"
" if( gIdx < num )\n"
" {\n"
" dstInt4[ offset+gIdx ] = value;\n"
" }\n"
"}\n"
"\n"
;

View File

@@ -2,33 +2,27 @@
static const char* prefixScanKernelsCL= \
"/*\n"
"Copyright (c) 2012 Advanced Micro Devices, Inc. \n"
"\n"
"This software is provided 'as-is', without any express or implied warranty.\n"
"In no event will the authors be held liable for any damages arising from the use of this software.\n"
"Permission is granted to anyone to use this software for any purpose, \n"
"including commercial applications, and to alter it and redistribute it freely, \n"
"subject to the following restrictions:\n"
"\n"
"1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.\n"
"2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.\n"
"3. This notice may not be removed or altered from any source distribution.\n"
"*/\n"
"//Originally written by Takahiro Harada\n"
"\n"
"\n"
"typedef unsigned int u32;\n"
"#define GET_GROUP_IDX get_group_id(0)\n"
"#define GET_LOCAL_IDX get_local_id(0)\n"
"#define GET_GLOBAL_IDX get_global_id(0)\n"
"#define GET_GROUP_SIZE get_local_size(0)\n"
"#define GROUP_LDS_BARRIER barrier(CLK_LOCAL_MEM_FENCE)\n"
"\n"
"// takahiro end\n"
"#define WG_SIZE 128 \n"
"#define m_numElems x\n"
"#define m_numBlocks y\n"
"#define m_numScanBlocks z\n"
"\n"
"/*typedef struct\n"
"{\n"
" uint m_numElems;\n"
@@ -37,7 +31,6 @@ static const char* prefixScanKernelsCL= \
" uint m_padding[1];\n"
"} ConstBuffer;\n"
"*/\n"
"\n"
"u32 ScanExclusive(__local u32* data, u32 n, int lIdx, int lSize)\n"
"{\n"
" u32 blocksum;\n"
@@ -52,17 +45,13 @@ static const char* prefixScanKernelsCL= \
" data[bi] += data[ai];\n"
" }\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" if( lIdx == 0 )\n"
" {\n"
" blocksum = data[ n-1 ];\n"
" data[ n-1 ] = 0;\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" offset >>= 1;\n"
" for(int nActive=1; nActive<n; nActive<<=1, offset>>=1 )\n"
" {\n"
@@ -77,27 +66,20 @@ static const char* prefixScanKernelsCL= \
" }\n"
" }\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" return blocksum;\n"
"}\n"
"\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"__kernel\n"
"void LocalScanKernel(__global u32* dst, __global u32 *src, __global u32 *sumBuffer,\n"
" uint4 cb)\n"
"{\n"
" __local u32 ldsData[WG_SIZE*2];\n"
"\n"
" int gIdx = GET_GLOBAL_IDX;\n"
" int lIdx = GET_LOCAL_IDX;\n"
"\n"
" ldsData[2*lIdx] = ( 2*gIdx < cb.m_numElems )? src[2*gIdx]: 0;\n"
" ldsData[2*lIdx + 1] = ( 2*gIdx+1 < cb.m_numElems )? src[2*gIdx + 1]: 0;\n"
"\n"
" u32 sum = ScanExclusive(ldsData, WG_SIZE*2, GET_LOCAL_IDX, GET_GROUP_SIZE);\n"
"\n"
" if( lIdx == 0 ) sumBuffer[GET_GROUP_IDX] = sum;\n"
"\n"
" if( (2*gIdx) < cb.m_numElems )\n"
" {\n"
" dst[2*gIdx] = ldsData[2*lIdx];\n"
@@ -107,25 +89,20 @@ static const char* prefixScanKernelsCL= \
" dst[2*gIdx + 1] = ldsData[2*lIdx + 1];\n"
" }\n"
"}\n"
"\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"__kernel\n"
"void AddOffsetKernel(__global u32 *dst, __global u32 *blockSum, uint4 cb)\n"
"{\n"
" const u32 blockSize = WG_SIZE*2;\n"
"\n"
" int myIdx = GET_GROUP_IDX+1;\n"
" int lIdx = GET_LOCAL_IDX;\n"
"\n"
" u32 iBlockSum = blockSum[myIdx];\n"
"\n"
" int endValue = min((myIdx+1)*(blockSize), cb.m_numElems);\n"
" for(int i=myIdx*blockSize+lIdx; i<endValue; i+=GET_GROUP_SIZE)\n"
" {\n"
" dst[i] += iBlockSum;\n"
" }\n"
"}\n"
"\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"__kernel\n"
"void TopLevelScanKernel(__global u32* dst, uint4 cb)\n"
@@ -134,21 +111,16 @@ static const char* prefixScanKernelsCL= \
" int gIdx = GET_GLOBAL_IDX;\n"
" int lIdx = GET_LOCAL_IDX;\n"
" int lSize = GET_GROUP_SIZE;\n"
"\n"
" for(int i=lIdx; i<cb.m_numScanBlocks; i+=lSize )\n"
" {\n"
" ldsData[i] = (i<cb.m_numBlocks)? dst[i]:0;\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" u32 sum = ScanExclusive(ldsData, cb.m_numScanBlocks, GET_LOCAL_IDX, GET_GROUP_SIZE);\n"
"\n"
" for(int i=lIdx; i<cb.m_numBlocks; i+=lSize )\n"
" {\n"
" dst[i] = ldsData[i];\n"
" }\n"
"\n"
" if( gIdx == 0 )\n"
" {\n"
" dst[cb.m_numBlocks] = sum;\n"

View File

@@ -2,33 +2,27 @@
static const char* prefixScanKernelsFloat4CL= \
"/*\n"
"Copyright (c) 2012 Advanced Micro Devices, Inc. \n"
"\n"
"This software is provided 'as-is', without any express or implied warranty.\n"
"In no event will the authors be held liable for any damages arising from the use of this software.\n"
"Permission is granted to anyone to use this software for any purpose, \n"
"including commercial applications, and to alter it and redistribute it freely, \n"
"subject to the following restrictions:\n"
"\n"
"1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.\n"
"2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.\n"
"3. This notice may not be removed or altered from any source distribution.\n"
"*/\n"
"//Originally written by Takahiro Harada\n"
"\n"
"\n"
"typedef unsigned int u32;\n"
"#define GET_GROUP_IDX get_group_id(0)\n"
"#define GET_LOCAL_IDX get_local_id(0)\n"
"#define GET_GLOBAL_IDX get_global_id(0)\n"
"#define GET_GROUP_SIZE get_local_size(0)\n"
"#define GROUP_LDS_BARRIER barrier(CLK_LOCAL_MEM_FENCE)\n"
"\n"
"// takahiro end\n"
"#define WG_SIZE 128 \n"
"#define m_numElems x\n"
"#define m_numBlocks y\n"
"#define m_numScanBlocks z\n"
"\n"
"/*typedef struct\n"
"{\n"
" uint m_numElems;\n"
@@ -37,7 +31,6 @@ static const char* prefixScanKernelsFloat4CL= \
" uint m_padding[1];\n"
"} ConstBuffer;\n"
"*/\n"
"\n"
"float4 ScanExclusiveFloat4(__local float4* data, u32 n, int lIdx, int lSize)\n"
"{\n"
" float4 blocksum;\n"
@@ -52,17 +45,13 @@ static const char* prefixScanKernelsFloat4CL= \
" data[bi] += data[ai];\n"
" }\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" if( lIdx == 0 )\n"
" {\n"
" blocksum = data[ n-1 ];\n"
" data[ n-1 ] = 0;\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" offset >>= 1;\n"
" for(int nActive=1; nActive<n; nActive<<=1, offset>>=1 )\n"
" {\n"
@@ -77,27 +66,20 @@ static const char* prefixScanKernelsFloat4CL= \
" }\n"
" }\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" return blocksum;\n"
"}\n"
"\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"__kernel\n"
"void LocalScanKernel(__global float4* dst, __global float4* src, __global float4* sumBuffer, uint4 cb)\n"
"{\n"
" __local float4 ldsData[WG_SIZE*2];\n"
"\n"
" int gIdx = GET_GLOBAL_IDX;\n"
" int lIdx = GET_LOCAL_IDX;\n"
"\n"
" ldsData[2*lIdx] = ( 2*gIdx < cb.m_numElems )? src[2*gIdx]: 0;\n"
" ldsData[2*lIdx + 1] = ( 2*gIdx+1 < cb.m_numElems )? src[2*gIdx + 1]: 0;\n"
"\n"
" float4 sum = ScanExclusiveFloat4(ldsData, WG_SIZE*2, GET_LOCAL_IDX, GET_GROUP_SIZE);\n"
"\n"
" if( lIdx == 0 ) \n"
" sumBuffer[GET_GROUP_IDX] = sum;\n"
"\n"
" if( (2*gIdx) < cb.m_numElems )\n"
" {\n"
" dst[2*gIdx] = ldsData[2*lIdx];\n"
@@ -107,25 +89,20 @@ static const char* prefixScanKernelsFloat4CL= \
" dst[2*gIdx + 1] = ldsData[2*lIdx + 1];\n"
" }\n"
"}\n"
"\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"__kernel\n"
"void AddOffsetKernel(__global float4* dst, __global float4* blockSum, uint4 cb)\n"
"{\n"
" const u32 blockSize = WG_SIZE*2;\n"
"\n"
" int myIdx = GET_GROUP_IDX+1;\n"
" int lIdx = GET_LOCAL_IDX;\n"
"\n"
" float4 iBlockSum = blockSum[myIdx];\n"
"\n"
" int endValue = min((myIdx+1)*(blockSize), cb.m_numElems);\n"
" for(int i=myIdx*blockSize+lIdx; i<endValue; i+=GET_GROUP_SIZE)\n"
" {\n"
" dst[i] += iBlockSum;\n"
" }\n"
"}\n"
"\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"__kernel\n"
"void TopLevelScanKernel(__global float4* dst, uint4 cb)\n"
@@ -134,21 +111,16 @@ static const char* prefixScanKernelsFloat4CL= \
" int gIdx = GET_GLOBAL_IDX;\n"
" int lIdx = GET_LOCAL_IDX;\n"
" int lSize = GET_GROUP_SIZE;\n"
"\n"
" for(int i=lIdx; i<cb.m_numScanBlocks; i+=lSize )\n"
" {\n"
" ldsData[i] = (i<cb.m_numBlocks)? dst[i]:0;\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" float4 sum = ScanExclusiveFloat4(ldsData, cb.m_numScanBlocks, GET_LOCAL_IDX, GET_GROUP_SIZE);\n"
"\n"
" for(int i=lIdx; i<cb.m_numBlocks; i+=lSize )\n"
" {\n"
" dst[i] = ldsData[i];\n"
" }\n"
"\n"
" if( gIdx == 0 )\n"
" {\n"
" dst[cb.m_numBlocks] = sum;\n"

View File

@@ -3,24 +3,19 @@ static const char* radixSort32KernelsCL= \
"/*\n"
"Bullet Continuous Collision Detection and Physics Library\n"
"Copyright (c) 2011 Advanced Micro Devices, Inc. http://bulletphysics.org\n"
"\n"
"This software is provided 'as-is', without any express or implied warranty.\n"
"In no event will the authors be held liable for any damages arising from the use of this software.\n"
"Permission is granted to anyone to use this software for any purpose, \n"
"including commercial applications, and to alter it and redistribute it freely, \n"
"subject to the following restrictions:\n"
"\n"
"1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.\n"
"2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.\n"
"3. This notice may not be removed or altered from any source distribution.\n"
"*/\n"
"//Author Takahiro Harada\n"
"\n"
"\n"
"//#pragma OPENCL EXTENSION cl_amd_printf : enable\n"
"#pragma OPENCL EXTENSION cl_khr_local_int32_base_atomics : enable\n"
"#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable\n"
"\n"
"typedef unsigned int u32;\n"
"#define GET_GROUP_IDX get_group_id(0)\n"
"#define GET_LOCAL_IDX get_local_id(0)\n"
@@ -31,38 +26,27 @@ static const char* radixSort32KernelsCL= \
"#define AtomInc(x) atom_inc(&(x))\n"
"#define AtomInc1(x, out) out = atom_inc(&(x))\n"
"#define AtomAdd(x, value) atom_add(&(x), value)\n"
"\n"
"#define SELECT_UINT4( b, a, condition ) select( b,a,condition )\n"
"\n"
"\n"
"#define make_uint4 (uint4)\n"
"#define make_uint2 (uint2)\n"
"#define make_int2 (int2)\n"
"\n"
"#define WG_SIZE 64\n"
"#define ELEMENTS_PER_WORK_ITEM (256/WG_SIZE)\n"
"#define BITS_PER_PASS 4\n"
"#define NUM_BUCKET (1<<BITS_PER_PASS)\n"
"typedef uchar u8;\n"
"\n"
"// this isn't optimization for VLIW. But just reducing writes. \n"
"#define USE_2LEVEL_REDUCE 1\n"
"\n"
"//#define CHECK_BOUNDARY 1\n"
"\n"
"//#define NV_GPU 1\n"
"\n"
"\n"
"// Cypress\n"
"#define nPerWI 16\n"
"// Cayman\n"
"//#define nPerWI 20\n"
"\n"
"#define m_n x\n"
"#define m_nWGs y\n"
"#define m_startBit z\n"
"#define m_nBlocksPerWG w\n"
"\n"
"/*\n"
"typedef struct\n"
"{\n"
@@ -72,14 +56,11 @@ static const char* radixSort32KernelsCL= \
" int m_nBlocksPerWG;\n"
"} ConstBuffer;\n"
"*/\n"
"\n"
"typedef struct\n"
"{\n"
" unsigned int m_key;\n"
" unsigned int m_value;\n"
"} SortDataCL;\n"
"\n"
"\n"
"uint prefixScanVectorEx( uint4* data )\n"
"{\n"
" u32 sum = 0;\n"
@@ -97,16 +78,13 @@ static const char* radixSort32KernelsCL= \
" sum += tmp;\n"
" return sum;\n"
"}\n"
"\n"
"u32 localPrefixSum( u32 pData, uint lIdx, uint* totalSum, __local u32* sorterSharedMemory, int wgSize /*64 or 128*/ )\n"
"{\n"
" { // Set data\n"
" sorterSharedMemory[lIdx] = 0;\n"
" sorterSharedMemory[lIdx+wgSize] = pData;\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" { // Prefix sum\n"
" int idx = 2*lIdx + (wgSize+1);\n"
"#if defined(USE_2LEVEL_REDUCE)\n"
@@ -118,13 +96,11 @@ static const char* radixSort32KernelsCL= \
" u2 = sorterSharedMemory[idx-1];\n"
" AtomAdd( sorterSharedMemory[idx], u0+u1+u2 ); \n"
" GROUP_MEM_FENCE;\n"
"\n"
" u0 = sorterSharedMemory[idx-12];\n"
" u1 = sorterSharedMemory[idx-8];\n"
" u2 = sorterSharedMemory[idx-4];\n"
" AtomAdd( sorterSharedMemory[idx], u0+u1+u2 ); \n"
" GROUP_MEM_FENCE;\n"
"\n"
" u0 = sorterSharedMemory[idx-48];\n"
" u1 = sorterSharedMemory[idx-32];\n"
" u2 = sorterSharedMemory[idx-16];\n"
@@ -135,7 +111,6 @@ static const char* radixSort32KernelsCL= \
" sorterSharedMemory[idx] += sorterSharedMemory[idx-64];\n"
" GROUP_MEM_FENCE;\n"
" }\n"
"\n"
" sorterSharedMemory[idx-1] += sorterSharedMemory[idx-2];\n"
" GROUP_MEM_FENCE;\n"
" }\n"
@@ -159,20 +134,16 @@ static const char* radixSort32KernelsCL= \
" sorterSharedMemory[idx] += sorterSharedMemory[idx-64];\n"
" GROUP_MEM_FENCE;\n"
" }\n"
"\n"
" sorterSharedMemory[idx-1] += sorterSharedMemory[idx-2];\n"
" GROUP_MEM_FENCE;\n"
" }\n"
"#endif\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" *totalSum = sorterSharedMemory[wgSize*2-1];\n"
" u32 addValue = sorterSharedMemory[lIdx+wgSize-1];\n"
" return addValue;\n"
"}\n"
"\n"
"//__attribute__((reqd_work_group_size(128,1,1)))\n"
"uint4 localPrefixSum128V( uint4 pData, uint lIdx, uint* totalSum, __local u32* sorterSharedMemory )\n"
"{\n"
@@ -180,8 +151,6 @@ static const char* radixSort32KernelsCL= \
" u32 rank = localPrefixSum( s4, lIdx, totalSum, sorterSharedMemory, 128 );\n"
" return pData + make_uint4( rank, rank, rank, rank );\n"
"}\n"
"\n"
"\n"
"//__attribute__((reqd_work_group_size(64,1,1)))\n"
"uint4 localPrefixSum64V( uint4 pData, uint lIdx, uint* totalSum, __local u32* sorterSharedMemory )\n"
"{\n"
@@ -189,28 +158,18 @@ static const char* radixSort32KernelsCL= \
" u32 rank = localPrefixSum( s4, lIdx, totalSum, sorterSharedMemory, 64 );\n"
" return pData + make_uint4( rank, rank, rank, rank );\n"
"}\n"
"\n"
"u32 unpack4Key( u32 key, int keyIdx ){ return (key>>(keyIdx*8)) & 0xff;}\n"
"\n"
"u32 bit8Scan(u32 v)\n"
"{\n"
" return (v<<8) + (v<<16) + (v<<24);\n"
"}\n"
"\n"
"//===\n"
"\n"
"\n"
"\n"
"\n"
"#define MY_HISTOGRAM(idx) localHistogramMat[(idx)*WG_SIZE+lIdx]\n"
"\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"void StreamCountKernel( __global u32* gSrc, __global u32* histogramOut, int4 cb )\n"
"{\n"
" __local u32 localHistogramMat[NUM_BUCKET*WG_SIZE];\n"
"\n"
" u32 gIdx = GET_GLOBAL_IDX;\n"
" u32 lIdx = GET_LOCAL_IDX;\n"
" u32 wgIdx = GET_GROUP_IDX;\n"
@@ -219,21 +178,15 @@ static const char* radixSort32KernelsCL= \
" const int n = cb.m_n;\n"
" const int nWGs = cb.m_nWGs;\n"
" const int nBlocksPerWG = cb.m_nBlocksPerWG;\n"
"\n"
" for(int i=0; i<NUM_BUCKET; i++)\n"
" {\n"
" MY_HISTOGRAM(i) = 0;\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" const int blockSize = ELEMENTS_PER_WORK_ITEM*WG_SIZE;\n"
" u32 localKey;\n"
"\n"
" int nBlocks = (n)/blockSize - nBlocksPerWG*wgIdx;\n"
"\n"
" int addr = blockSize*nBlocksPerWG*wgIdx + ELEMENTS_PER_WORK_ITEM*lIdx;\n"
"\n"
" for(int iblock=0; iblock<min(nBlocksPerWG, nBlocks); iblock++, addr+=blockSize)\n"
" {\n"
" // MY_HISTOGRAM( localKeys.x ) ++ is much expensive than atomic add as it requires read and write while atomics can just add on AMD\n"
@@ -254,7 +207,6 @@ static const char* radixSort32KernelsCL= \
" }\n"
" }\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
" \n"
" if( lIdx < NUM_BUCKET )\n"
@@ -267,13 +219,11 @@ static const char* radixSort32KernelsCL= \
" histogramOut[lIdx*nWGs+wgIdx] = sum;\n"
" }\n"
"}\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"void StreamCountSortDataKernel( __global SortDataCL* gSrc, __global u32* histogramOut, int4 cb )\n"
"{\n"
" __local u32 localHistogramMat[NUM_BUCKET*WG_SIZE];\n"
"\n"
" u32 gIdx = GET_GLOBAL_IDX;\n"
" u32 lIdx = GET_LOCAL_IDX;\n"
" u32 wgIdx = GET_GROUP_IDX;\n"
@@ -282,21 +232,15 @@ static const char* radixSort32KernelsCL= \
" const int n = cb.m_n;\n"
" const int nWGs = cb.m_nWGs;\n"
" const int nBlocksPerWG = cb.m_nBlocksPerWG;\n"
"\n"
" for(int i=0; i<NUM_BUCKET; i++)\n"
" {\n"
" MY_HISTOGRAM(i) = 0;\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" const int blockSize = ELEMENTS_PER_WORK_ITEM*WG_SIZE;\n"
" u32 localKey;\n"
"\n"
" int nBlocks = (n)/blockSize - nBlocksPerWG*wgIdx;\n"
"\n"
" int addr = blockSize*nBlocksPerWG*wgIdx + ELEMENTS_PER_WORK_ITEM*lIdx;\n"
"\n"
" for(int iblock=0; iblock<min(nBlocksPerWG, nBlocks); iblock++, addr+=blockSize)\n"
" {\n"
" // MY_HISTOGRAM( localKeys.x ) ++ is much expensive than atomic add as it requires read and write while atomics can just add on AMD\n"
@@ -317,7 +261,6 @@ static const char* radixSort32KernelsCL= \
" }\n"
" }\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
" \n"
" if( lIdx < NUM_BUCKET )\n"
@@ -330,20 +273,16 @@ static const char* radixSort32KernelsCL= \
" histogramOut[lIdx*nWGs+wgIdx] = sum;\n"
" }\n"
"}\n"
"\n"
"#define nPerLane (nPerWI/4)\n"
"\n"
"// NUM_BUCKET*nWGs < 128*nPerWI\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(128,1,1)))\n"
"void PrefixScanKernel( __global u32* wHistogram1, int4 cb )\n"
"{\n"
" __local u32 ldsTopScanData[128*2];\n"
"\n"
" u32 lIdx = GET_LOCAL_IDX;\n"
" u32 wgIdx = GET_GROUP_IDX;\n"
" const int nWGs = cb.m_nWGs;\n"
"\n"
" u32 data[nPerWI];\n"
" for(int i=0; i<nPerWI; i++)\n"
" {\n"
@@ -351,9 +290,7 @@ static const char* radixSort32KernelsCL= \
" if( (nPerWI*lIdx+i) < NUM_BUCKET*nWGs )\n"
" data[i] = wHistogram1[nPerWI*lIdx+i];\n"
" }\n"
"\n"
" uint4 myData = make_uint4(0,0,0,0);\n"
"\n"
" for(int i=0; i<nPerLane; i++)\n"
" {\n"
" myData.x += data[nPerLane*0+i];\n"
@@ -361,10 +298,8 @@ static const char* radixSort32KernelsCL= \
" myData.z += data[nPerLane*2+i];\n"
" myData.w += data[nPerLane*3+i];\n"
" }\n"
"\n"
" uint totalSum;\n"
" uint4 scanned = localPrefixSum128V( myData, lIdx, &totalSum, ldsTopScanData );\n"
"\n"
"// for(int j=0; j<4; j++) // somehow it introduces a lot of branches\n"
" { int j = 0;\n"
" u32 sum = 0;\n"
@@ -402,7 +337,6 @@ static const char* radixSort32KernelsCL= \
" sum += tmp;\n"
" }\n"
" }\n"
"\n"
" for(int i=0; i<nPerLane; i++)\n"
" {\n"
" data[nPerLane*0+i] += scanned.x;\n"
@@ -410,7 +344,6 @@ static const char* radixSort32KernelsCL= \
" data[nPerLane*2+i] += scanned.z;\n"
" data[nPerLane*3+i] += scanned.w;\n"
" }\n"
"\n"
" for(int i=0; i<nPerWI; i++)\n"
" {\n"
" int index = nPerWI*lIdx+i;\n"
@@ -418,7 +351,6 @@ static const char* radixSort32KernelsCL= \
" wHistogram1[nPerWI*lIdx+i] = data[i];\n"
" }\n"
"}\n"
"\n"
"// 4 scan, 4 exchange\n"
"void sort4Bits(u32 sortData[4], int startBit, int lIdx, __local u32* ldsSortData)\n"
"{\n"
@@ -433,26 +365,20 @@ static const char* radixSort32KernelsCL= \
" uint4 localAddr = make_uint4(lIdx*4+0,lIdx*4+1,lIdx*4+2,lIdx*4+3);\n"
" uint4 dstAddr = localAddr - prefixSum + make_uint4( total, total, total, total );\n"
" dstAddr = SELECT_UINT4( prefixSum, dstAddr, cmpResult != make_uint4(0, 0, 0, 0) );\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" ldsSortData[dstAddr.x] = sortData[0];\n"
" ldsSortData[dstAddr.y] = sortData[1];\n"
" ldsSortData[dstAddr.z] = sortData[2];\n"
" ldsSortData[dstAddr.w] = sortData[3];\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" sortData[0] = ldsSortData[localAddr.x];\n"
" sortData[1] = ldsSortData[localAddr.y];\n"
" sortData[2] = ldsSortData[localAddr.z];\n"
" sortData[3] = ldsSortData[localAddr.w];\n"
"\n"
" GROUP_LDS_BARRIER;\n"
" }\n"
" }\n"
"}\n"
"\n"
"// 2 scan, 2 exchange\n"
"void sort4Bits1(u32 sortData[4], int startBit, int lIdx, __local u32* ldsSortData)\n"
"{\n"
@@ -462,7 +388,6 @@ static const char* radixSort32KernelsCL= \
" (sortData[1]>>(startBit+ibit)) & 0x3, \n"
" (sortData[2]>>(startBit+ibit)) & 0x3, \n"
" (sortData[3]>>(startBit+ibit)) & 0x3);\n"
"\n"
" u32 key4;\n"
" u32 sKeyPacked[4] = { 0, 0, 0, 0 };\n"
" {\n"
@@ -470,22 +395,17 @@ static const char* radixSort32KernelsCL= \
" sKeyPacked[1] |= 1<<(8*b.y);\n"
" sKeyPacked[2] |= 1<<(8*b.z);\n"
" sKeyPacked[3] |= 1<<(8*b.w);\n"
"\n"
" key4 = sKeyPacked[0] + sKeyPacked[1] + sKeyPacked[2] + sKeyPacked[3];\n"
" }\n"
"\n"
" u32 rankPacked;\n"
" u32 sumPacked;\n"
" {\n"
" rankPacked = localPrefixSum( key4, lIdx, &sumPacked, ldsSortData, WG_SIZE );\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" u32 newOffset[4] = { 0,0,0,0 };\n"
" {\n"
" u32 sumScanned = bit8Scan( sumPacked );\n"
"\n"
" u32 scannedKeys[4];\n"
" scannedKeys[0] = 1<<(8*b.x);\n"
" scannedKeys[1] = 1<<(8*b.y);\n"
@@ -500,7 +420,6 @@ static const char* radixSort32KernelsCL= \
" sum4 += tmp;\n"
" }\n"
" }\n"
"\n"
" {\n"
" u32 sumPlusRank = sumScanned + rankPacked;\n"
" { u32 ie = b.x;\n"
@@ -521,31 +440,23 @@ static const char* radixSort32KernelsCL= \
" }\n"
" }\n"
" }\n"
"\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" {\n"
" ldsSortData[newOffset[0]] = sortData[0];\n"
" ldsSortData[newOffset[1]] = sortData[1];\n"
" ldsSortData[newOffset[2]] = sortData[2];\n"
" ldsSortData[newOffset[3]] = sortData[3];\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" u32 dstAddr = 4*lIdx;\n"
" sortData[0] = ldsSortData[dstAddr+0];\n"
" sortData[1] = ldsSortData[dstAddr+1];\n"
" sortData[2] = ldsSortData[dstAddr+2];\n"
" sortData[3] = ldsSortData[dstAddr+3];\n"
"\n"
" GROUP_LDS_BARRIER;\n"
" }\n"
" }\n"
"}\n"
"\n"
"#define SET_HISTOGRAM(setIdx, key) ldsSortData[(setIdx)*NUM_BUCKET+key]\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"void SortAndScatterKernel( __global const u32* restrict gSrc, __global const u32* rHistogram, __global u32* restrict gDst, int4 cb )\n"
@@ -553,34 +464,25 @@ static const char* radixSort32KernelsCL= \
" __local u32 ldsSortData[WG_SIZE*ELEMENTS_PER_WORK_ITEM+16];\n"
" __local u32 localHistogramToCarry[NUM_BUCKET];\n"
" __local u32 localHistogram[NUM_BUCKET*2];\n"
"\n"
" u32 gIdx = GET_GLOBAL_IDX;\n"
" u32 lIdx = GET_LOCAL_IDX;\n"
" u32 wgIdx = GET_GROUP_IDX;\n"
" u32 wgSize = GET_GROUP_SIZE;\n"
"\n"
" const int n = cb.m_n;\n"
" const int nWGs = cb.m_nWGs;\n"
" const int startBit = cb.m_startBit;\n"
" const int nBlocksPerWG = cb.m_nBlocksPerWG;\n"
"\n"
" if( lIdx < (NUM_BUCKET) )\n"
" {\n"
" localHistogramToCarry[lIdx] = rHistogram[lIdx*nWGs + wgIdx];\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" const int blockSize = ELEMENTS_PER_WORK_ITEM*WG_SIZE;\n"
"\n"
" int nBlocks = n/blockSize - nBlocksPerWG*wgIdx;\n"
"\n"
" int addr = blockSize*nBlocksPerWG*wgIdx + ELEMENTS_PER_WORK_ITEM*lIdx;\n"
"\n"
" for(int iblock=0; iblock<min(nBlocksPerWG, nBlocks); iblock++, addr+=blockSize)\n"
" {\n"
" u32 myHistogram = 0;\n"
"\n"
" u32 sortData[ELEMENTS_PER_WORK_ITEM];\n"
" for(int i=0; i<ELEMENTS_PER_WORK_ITEM; i++)\n"
"#if defined(CHECK_BOUNDARY)\n"
@@ -588,13 +490,10 @@ static const char* radixSort32KernelsCL= \
"#else\n"
" sortData[i] = gSrc[ addr+i ];\n"
"#endif\n"
"\n"
" sort4Bits(sortData, startBit, lIdx, ldsSortData);\n"
"\n"
" u32 keys[ELEMENTS_PER_WORK_ITEM];\n"
" for(int i=0; i<ELEMENTS_PER_WORK_ITEM; i++)\n"
" keys[i] = (sortData[i]>>startBit) & 0xf;\n"
"\n"
" { // create histogram\n"
" u32 setIdx = lIdx/16;\n"
" if( lIdx < NUM_BUCKET )\n"
@@ -603,12 +502,10 @@ static const char* radixSort32KernelsCL= \
" }\n"
" ldsSortData[lIdx] = 0;\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" for(int i=0; i<ELEMENTS_PER_WORK_ITEM; i++)\n"
"#if defined(CHECK_BOUNDARY)\n"
" if( addr+i < n )\n"
"#endif\n"
"\n"
"#if defined(NV_GPU)\n"
" SET_HISTOGRAM( setIdx, keys[i] )++;\n"
"#else\n"
@@ -629,13 +526,11 @@ static const char* radixSort32KernelsCL= \
" localHistogram[hIdx] = sum;\n"
" }\n"
" GROUP_LDS_BARRIER;\n"
"\n"
"#if defined(USE_2LEVEL_REDUCE)\n"
" if( lIdx < NUM_BUCKET )\n"
" {\n"
" localHistogram[hIdx] = localHistogram[hIdx-1];\n"
" GROUP_MEM_FENCE;\n"
"\n"
" u32 u0, u1, u2;\n"
" u0 = localHistogram[hIdx-3];\n"
" u1 = localHistogram[hIdx-2];\n"
@@ -665,7 +560,6 @@ static const char* radixSort32KernelsCL= \
"#endif\n"
" GROUP_LDS_BARRIER;\n"
" }\n"
"\n"
" {\n"
" for(int ie=0; ie<ELEMENTS_PER_WORK_ITEM; ie++)\n"
" {\n"
@@ -679,9 +573,7 @@ static const char* radixSort32KernelsCL= \
" gDst[ groupOffset + myIdx ] = sortData[ie];\n"
" }\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" if( lIdx < NUM_BUCKET )\n"
" {\n"
" localHistogramToCarry[lIdx] += myHistogram;\n"
@@ -689,7 +581,6 @@ static const char* radixSort32KernelsCL= \
" GROUP_LDS_BARRIER;\n"
" }\n"
"}\n"
"\n"
"// 2 scan, 2 exchange\n"
"void sort4Bits1KeyValue(u32 sortData[4], int sortVal[4], int startBit, int lIdx, __local u32* ldsSortData, __local int *ldsSortVal)\n"
"{\n"
@@ -699,7 +590,6 @@ static const char* radixSort32KernelsCL= \
" (sortData[1]>>(startBit+ibit)) & 0x3, \n"
" (sortData[2]>>(startBit+ibit)) & 0x3, \n"
" (sortData[3]>>(startBit+ibit)) & 0x3);\n"
"\n"
" u32 key4;\n"
" u32 sKeyPacked[4] = { 0, 0, 0, 0 };\n"
" {\n"
@@ -707,22 +597,17 @@ static const char* radixSort32KernelsCL= \
" sKeyPacked[1] |= 1<<(8*b.y);\n"
" sKeyPacked[2] |= 1<<(8*b.z);\n"
" sKeyPacked[3] |= 1<<(8*b.w);\n"
"\n"
" key4 = sKeyPacked[0] + sKeyPacked[1] + sKeyPacked[2] + sKeyPacked[3];\n"
" }\n"
"\n"
" u32 rankPacked;\n"
" u32 sumPacked;\n"
" {\n"
" rankPacked = localPrefixSum( key4, lIdx, &sumPacked, ldsSortData, WG_SIZE );\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" u32 newOffset[4] = { 0,0,0,0 };\n"
" {\n"
" u32 sumScanned = bit8Scan( sumPacked );\n"
"\n"
" u32 scannedKeys[4];\n"
" scannedKeys[0] = 1<<(8*b.x);\n"
" scannedKeys[1] = 1<<(8*b.y);\n"
@@ -737,7 +622,6 @@ static const char* radixSort32KernelsCL= \
" sum4 += tmp;\n"
" }\n"
" }\n"
"\n"
" {\n"
" u32 sumPlusRank = sumScanned + rankPacked;\n"
" { u32 ie = b.x;\n"
@@ -758,42 +642,30 @@ static const char* radixSort32KernelsCL= \
" }\n"
" }\n"
" }\n"
"\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" {\n"
" ldsSortData[newOffset[0]] = sortData[0];\n"
" ldsSortData[newOffset[1]] = sortData[1];\n"
" ldsSortData[newOffset[2]] = sortData[2];\n"
" ldsSortData[newOffset[3]] = sortData[3];\n"
"\n"
" ldsSortVal[newOffset[0]] = sortVal[0];\n"
" ldsSortVal[newOffset[1]] = sortVal[1];\n"
" ldsSortVal[newOffset[2]] = sortVal[2];\n"
" ldsSortVal[newOffset[3]] = sortVal[3];\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" u32 dstAddr = 4*lIdx;\n"
" sortData[0] = ldsSortData[dstAddr+0];\n"
" sortData[1] = ldsSortData[dstAddr+1];\n"
" sortData[2] = ldsSortData[dstAddr+2];\n"
" sortData[3] = ldsSortData[dstAddr+3];\n"
"\n"
" sortVal[0] = ldsSortVal[dstAddr+0];\n"
" sortVal[1] = ldsSortVal[dstAddr+1];\n"
" sortVal[2] = ldsSortVal[dstAddr+2];\n"
" sortVal[3] = ldsSortVal[dstAddr+3];\n"
"\n"
" GROUP_LDS_BARRIER;\n"
" }\n"
" }\n"
"}\n"
"\n"
"\n"
"\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"void SortAndScatterSortDataKernel( __global const SortDataCL* restrict gSrc, __global const u32* rHistogram, __global SortDataCL* restrict gDst, int4 cb)\n"
@@ -802,39 +674,28 @@ static const char* radixSort32KernelsCL= \
" __local int ldsSortVal[WG_SIZE*ELEMENTS_PER_WORK_ITEM+16];\n"
" __local u32 localHistogramToCarry[NUM_BUCKET];\n"
" __local u32 localHistogram[NUM_BUCKET*2];\n"
"\n"
" u32 gIdx = GET_GLOBAL_IDX;\n"
" u32 lIdx = GET_LOCAL_IDX;\n"
" u32 wgIdx = GET_GROUP_IDX;\n"
" u32 wgSize = GET_GROUP_SIZE;\n"
"\n"
" const int n = cb.m_n;\n"
" const int nWGs = cb.m_nWGs;\n"
" const int startBit = cb.m_startBit;\n"
" const int nBlocksPerWG = cb.m_nBlocksPerWG;\n"
"\n"
" if( lIdx < (NUM_BUCKET) )\n"
" {\n"
" localHistogramToCarry[lIdx] = rHistogram[lIdx*nWGs + wgIdx];\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
" \n"
"\n"
" const int blockSize = ELEMENTS_PER_WORK_ITEM*WG_SIZE;\n"
"\n"
" int nBlocks = n/blockSize - nBlocksPerWG*wgIdx;\n"
"\n"
" int addr = blockSize*nBlocksPerWG*wgIdx + ELEMENTS_PER_WORK_ITEM*lIdx;\n"
"\n"
" for(int iblock=0; iblock<min(nBlocksPerWG, nBlocks); iblock++, addr+=blockSize)\n"
" {\n"
"\n"
" u32 myHistogram = 0;\n"
"\n"
" int sortData[ELEMENTS_PER_WORK_ITEM];\n"
" int sortVal[ELEMENTS_PER_WORK_ITEM];\n"
"\n"
" for(int i=0; i<ELEMENTS_PER_WORK_ITEM; i++)\n"
"#if defined(CHECK_BOUNDARY)\n"
" {\n"
@@ -847,13 +708,10 @@ static const char* radixSort32KernelsCL= \
" sortVal[i] = gSrc[ addr+i ].m_value;\n"
" }\n"
"#endif\n"
"\n"
" sort4Bits1KeyValue(sortData, sortVal, startBit, lIdx, ldsSortData, ldsSortVal);\n"
"\n"
" u32 keys[ELEMENTS_PER_WORK_ITEM];\n"
" for(int i=0; i<ELEMENTS_PER_WORK_ITEM; i++)\n"
" keys[i] = (sortData[i]>>startBit) & 0xf;\n"
"\n"
" { // create histogram\n"
" u32 setIdx = lIdx/16;\n"
" if( lIdx < NUM_BUCKET )\n"
@@ -862,12 +720,10 @@ static const char* radixSort32KernelsCL= \
" }\n"
" ldsSortData[lIdx] = 0;\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" for(int i=0; i<ELEMENTS_PER_WORK_ITEM; i++)\n"
"#if defined(CHECK_BOUNDARY)\n"
" if( addr+i < n )\n"
"#endif\n"
"\n"
"#if defined(NV_GPU)\n"
" SET_HISTOGRAM( setIdx, keys[i] )++;\n"
"#else\n"
@@ -888,13 +744,11 @@ static const char* radixSort32KernelsCL= \
" localHistogram[hIdx] = sum;\n"
" }\n"
" GROUP_LDS_BARRIER;\n"
"\n"
"#if defined(USE_2LEVEL_REDUCE)\n"
" if( lIdx < NUM_BUCKET )\n"
" {\n"
" localHistogram[hIdx] = localHistogram[hIdx-1];\n"
" GROUP_MEM_FENCE;\n"
"\n"
" u32 u0, u1, u2;\n"
" u0 = localHistogram[hIdx-3];\n"
" u1 = localHistogram[hIdx-2];\n"
@@ -924,7 +778,6 @@ static const char* radixSort32KernelsCL= \
"#endif\n"
" GROUP_LDS_BARRIER;\n"
" }\n"
"\n"
" {\n"
" for(int ie=0; ie<ELEMENTS_PER_WORK_ITEM; ie++)\n"
" {\n"
@@ -958,9 +811,7 @@ static const char* radixSort32KernelsCL= \
"#endif\n"
" }\n"
" }\n"
"\n"
" GROUP_LDS_BARRIER;\n"
"\n"
" if( lIdx < NUM_BUCKET )\n"
" {\n"
" localHistogramToCarry[lIdx] += myHistogram;\n"
@@ -968,13 +819,6 @@ static const char* radixSort32KernelsCL= \
" GROUP_LDS_BARRIER;\n"
" }\n"
"}\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"void SortAndScatterSortDataKernelSerial( __global const SortDataCL* restrict gSrc, __global const u32* rHistogram, __global SortDataCL* restrict gDst, int4 cb)\n"
@@ -988,7 +832,6 @@ static const char* radixSort32KernelsCL= \
" const int n = cb.m_n;\n"
" const int nWGs = cb.m_nWGs;\n"
" const int nBlocksPerWG = cb.m_nBlocksPerWG;\n"
"\n"
" int counter[NUM_BUCKET];\n"
" \n"
" if (realLocalIdx>0)\n"
@@ -996,11 +839,9 @@ static const char* radixSort32KernelsCL= \
" \n"
" for (int c=0;c<NUM_BUCKET;c++)\n"
" counter[c]=0;\n"
"\n"
" const int blockSize = ELEMENTS_PER_WORK_ITEM*WG_SIZE;\n"
" \n"
" int nBlocks = (n)/blockSize - nBlocksPerWG*wgIdx;\n"
"\n"
" for(int iblock=0; iblock<min(nBlocksPerWG, nBlocks); iblock++)\n"
" {\n"
" for (int lIdx=0;lIdx<WG_SIZE;lIdx++)\n"
@@ -1022,8 +863,6 @@ static const char* radixSort32KernelsCL= \
" }\n"
" \n"
"}\n"
"\n"
"\n"
"__kernel\n"
"__attribute__((reqd_work_group_size(WG_SIZE,1,1)))\n"
"void SortAndScatterKernelSerial( __global const u32* restrict gSrc, __global const u32* rHistogram, __global u32* restrict gDst, int4 cb )\n"
@@ -1037,7 +876,6 @@ static const char* radixSort32KernelsCL= \
" const int n = cb.m_n;\n"
" const int nWGs = cb.m_nWGs;\n"
" const int nBlocksPerWG = cb.m_nBlocksPerWG;\n"
"\n"
" int counter[NUM_BUCKET];\n"
" \n"
" if (realLocalIdx>0)\n"
@@ -1045,11 +883,9 @@ static const char* radixSort32KernelsCL= \
" \n"
" for (int c=0;c<NUM_BUCKET;c++)\n"
" counter[c]=0;\n"
"\n"
" const int blockSize = ELEMENTS_PER_WORK_ITEM*WG_SIZE;\n"
" \n"
" int nBlocks = (n)/blockSize - nBlocksPerWG*wgIdx;\n"
"\n"
" for(int iblock=0; iblock<min(nBlocksPerWG, nBlocks); iblock++)\n"
" {\n"
" for (int lIdx=0;lIdx<WG_SIZE;lIdx++)\n"