hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6d13723bfe302c03921d9fc9e3bfbe6a18365285.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeMinEnergyMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *energy = NULL;
hipMalloc(&energy, XSIZE*YSIZE);
float *min_energy = NULL;
hipMalloc(&min_energy, XSIZE*YSIZE);
int height = YSIZE;
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeMinEnergyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, energy,min_energy,height,width);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeMinEnergyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, energy,min_energy,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeMinEnergyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, energy,min_energy,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6d13723bfe302c03921d9fc9e3bfbe6a18365285.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeMinEnergyMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *energy = NULL;
cudaMalloc(&energy, XSIZE*YSIZE);
float *min_energy = NULL;
cudaMalloc(&min_energy, XSIZE*YSIZE);
int height = YSIZE;
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeMinEnergyMatrix<<<gridBlock,threadBlock>>>(energy,min_energy,height,width);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeMinEnergyMatrix<<<gridBlock,threadBlock>>>(energy,min_energy,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeMinEnergyMatrix<<<gridBlock,threadBlock>>>(energy,min_energy,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bf53f16abf1d47b345cb55245a94b2049eaa255d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
int selectedDevice;
const int N = 21;
int numLabels;
uint numVars;
__device__ __constant__ int dev_numRules;
__device__ __constant__ int dev_rules[N * 3];
__device__ __constant__ int dev_numLabels;
__device__ __constant__ uint dev_heapSize;
__device__ __constant__ uint *dev_elementPool;
// index of the next free element in the corresponding free list
// in words
__device__ uint freeList1 = 0;
__device__ uint freeList2 = 0;
__device__ uint worklistIndex = 0;
__device__ uint counter = 0;
__device__ bool dev_r = false;
__device__ bool dev_changed = false;
void transferRules(int numRules, int *rules)
{
if (numRules > N) {
std::cerr << "N needs to be reset." << std::endl;
exit(-1);
}
cudaSafeCall(hipMemcpyToSymbol(dev_numRules, &numRules, sizeof(int)));
cudaSafeCall(hipMemcpyToSymbol(dev_rules, rules, numRules * sizeof(int) * 3));
cudaSafeCall(hipMemcpyToSymbol(dev_numLabels, &numLabels, sizeof(int)));
}
void allocateElementPool(uint &heapSize, uint* &elementPool)
{
heapSize = (DEVMEMUSED_MB / 8 * 7) * 1024 * 256;
std::cout << "HEAP SIZE: " << heapSize << " (in 32-bit words)" << std::endl;
cudaSafeCall(hipMalloc((void **)&elementPool, heapSize * sizeof(uint)));
cudaSafeCall(hipMemcpyToSymbol(dev_heapSize, &heapSize, sizeof(uint)));
cudaSafeCall(hipMemcpyToSymbol(dev_elementPool, &elementPool, sizeof(uint*)));
}
void transferElements(Partition p, uint *elements, int start, uint heapSize, uint *elementPool)
{
uint poolSize = p.oldSize + p.deltaSize + p.tmpSize;
if (start) {
cudaSafeCall(hipMemcpyToSymbol(freeList2, &poolSize, sizeof(uint)));
} else {
cudaSafeCall(hipMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
}
cudaSafeCall(hipMemcpy(elementPool + (heapSize / 2) * start, elements, poolSize * sizeof(uint), H2D));
delete[] elements;
}
void initialize(uint headSize, int start, uint offset, uint heapSize, uint *elementPool)
{
uint poolSize = offset + headSize;
if (start) {
cudaSafeCall(hipMemcpyToSymbol(freeList2, &poolSize, sizeof(uint)));
} else {
cudaSafeCall(hipMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
}
cudaSafeCall(hipMemset(elementPool + (heapSize / 2) * start + offset, -1, headSize * sizeof(uint)));
}
void needRepartition(Partition &p, uint heapSize, bool &r)
{
bool s = false;
cudaSafeCall(hipMemcpyFromSymbol(&s, dev_r, sizeof(bool)));
if (s) {
p.tmpSize = heapSize / 2 - p.deltaSize - p.oldSize;
s = false;
cudaSafeCall(hipMemcpyToSymbol(dev_r, &s, sizeof(bool)));
r = true;
std::cout << "Need Repartition." << std::endl;
}
}
__host__ inline uint getBlocks()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, selectedDevice);
return deviceProp.multiProcessorCount * 4;
}
__device__ inline uint getThreadIdInBlock()
{
return threadIdx.x + threadIdx.y * blockDim.x;
}
__device__ inline uint isFirstThreadOfBlock()
{
return !getThreadIdInBlock();
}
__device__ inline uint isFirstThreadOfVWarp()
{
return !threadIdx.x;
}
__device__ inline void graphSet(const uint pos, const uint val)
{
dev_elementPool[pos] = val;
}
__device__ inline uint graphGet(const uint pos)
{
return dev_elementPool[pos];
}
__device__ inline uint getAndIncrement(const uint delta)
{
__shared__ volatile uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (isFirstThreadOfVWarp())
temp[threadIdx.y] = atomicAdd(&worklistIndex, delta);
return temp[threadIdx.y];
}
__device__ inline void resetWorklistIndex()
{
__syncthreads();
if (isFirstThreadOfBlock() && atomicInc(&counter, gridDim.x - 1) == (gridDim.x - 1))
worklistIndex = 0;
}
__device__ uint getValAtThread(const uint myVal, const uint i)
{
__shared__ volatile uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (threadIdx.x == i)
temp[threadIdx.y] = myVal;
return temp[threadIdx.y];
}
__device__ inline uint mallocIn(int start, uint size = ELEMENT_WIDTH)
{
__shared__ volatile uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (isFirstThreadOfVWarp()) {
if (start) {
temp[threadIdx.y] = atomicAdd(&freeList2, size);
} else {
temp[threadIdx.y] = atomicAdd(&freeList1, size);
}
}
if (temp[threadIdx.y] + size > dev_heapSize / 2) {
dev_r = true;
return -1;
} else {
return temp[threadIdx.y];
}
}
__device__ inline uint getIndex(uint headIndex, int start)
{
uint index = graphGet(headIndex);
if (index == NIL) {
uint newIndex = mallocIn(start);
if (newIndex != -1) {
graphSet((dev_heapSize / 2) * start + newIndex + threadIdx.x, NIL);
graphSet(headIndex, newIndex);
}
return newIndex;
}
return index;
}
__device__ uint addElement(uint index, uint fromBase, uint fromBits, int start)
{
uint startIndex = (dev_heapSize / 2) * start;
for (;;) {
uint toBits = graphGet(index + threadIdx.x);
uint toBase = getValAtThread(toBits, BASE);
if (toBase == NIL) {
// can only happen if the list is empty
graphSet(index + threadIdx.x, fromBits);
return index;
}
if (toBase == fromBase) {
uint orBits = toBits | fromBits;
if (orBits != toBits && threadIdx.x < NEXT)
graphSet(index + threadIdx.x, orBits);
return index;
}
if (toBase < fromBase) {
uint toNext = getValAtThread(toBits, NEXT);
if (toNext == NIL) {
// appending
uint newIndex = mallocIn(start);
if (newIndex == -1) return -1;
graphSet(newIndex + startIndex + threadIdx.x, fromBits);
graphSet(index + NEXT, newIndex);
return newIndex + startIndex;
}
index = toNext + startIndex;
} else {
uint newIndex = mallocIn(start);
if (newIndex == -1) return -1;
graphSet(newIndex + startIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
graphSet(index + threadIdx.x, val);
return index;
}
}
}
__device__ uint insert(uint index, uint var, int start)
{
uint base = BASE_OF(var);
uint unit = UNIT_OF(var);
uint bit = BIT_OF(var);
uint myBits = 0;
if (threadIdx.x == unit) myBits = 1 << bit;
if (threadIdx.x == BASE) myBits = base;
if (threadIdx.x == NEXT) myBits = NIL;
return addElement(index, base, myBits, start);
}
__device__ uint clone(uint nextIndex, int toStart, uint fromBits, uint fromNext, uint fromStartIndex)
{
uint toStartIndex = (dev_heapSize / 2) * toStart;
for (;;) {
uint newIndex = mallocIn(toStart);
if (newIndex == -1) return -1;
dev_changed = true;
uint val = threadIdx.x == NEXT ? NIL : fromBits;
graphSet(newIndex + toStartIndex + threadIdx.x, val);
graphSet(nextIndex, newIndex);
if (fromNext == NIL) break;
fromBits = graphGet(fromNext + fromStartIndex + threadIdx.x);
fromNext = getValAtThread(fromBits, NEXT);
nextIndex = newIndex + toStartIndex + NEXT;
}
return 0;
}
__device__ uint union2(uint to, uint toRel, ComputeRegion tmp, uint fromIndex, int fromStart)
{
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
uint toStartIndex = (dev_heapSize / 2) * tmp.start;
uint fromBits = graphGet(fromIndex + threadIdx.x);
uint fromBase = getValAtThread(fromBits, BASE);
uint fromNext = getValAtThread(fromBits, NEXT);
uint toHeadIndex = toStartIndex + tmp.offset + roundToNextMultipleOf(tmp.lastVar - tmp.firstVar + 1) * (toRel - 1) + to;
uint toIndex = graphGet(toHeadIndex);
if (toIndex == NIL) {
uint s = clone(toHeadIndex, tmp.start, fromBits, fromNext, fromStartIndex);
if (s == -1) return -1;
return 0;
}
toIndex += toStartIndex;
uint toBits = graphGet(toIndex + threadIdx.x);
uint toBase = getValAtThread(toBits, BASE);
uint toNext = getValAtThread(toBits, NEXT);
for (;;) {
if (toBase > fromBase) {
uint newIndex = mallocIn(tmp.start);
if (newIndex == -1) return -1;
dev_changed = true;
graphSet(newIndex + toStartIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
graphSet(toIndex + threadIdx.x, val);
if (fromNext == NIL) return 0;
toIndex = newIndex + toStartIndex;
fromBits = graphGet(fromNext + fromStartIndex + threadIdx.x);
fromBase = getValAtThread(fromBits, BASE);
fromNext = getValAtThread(fromBits, NEXT);
} else if (toBase == fromBase) {
uint orBits = fromBits | toBits;
uint newBits = threadIdx.x == NEXT ? toNext : orBits;
if (newBits != toBits) dev_changed = true;
graphSet(toIndex + threadIdx.x, newBits);
if (fromNext == NIL) return 0;
fromBits = graphGet(fromNext + fromStartIndex + threadIdx.x);
fromBase = getValAtThread(fromBits, BASE);
fromNext = getValAtThread(fromBits, NEXT);
if (toNext == NIL) {
uint s = clone(toIndex + NEXT, tmp.start, fromBits, fromNext, fromStartIndex);
if (s == -1) return -1;
return 0;
}
toIndex = toNext + toStartIndex;
toBits = graphGet(toIndex + threadIdx.x);
toBase = getValAtThread(toBits, BASE);
toNext = getValAtThread(toBits, NEXT);
} else {
if (toNext == NIL) {
uint s = clone(toIndex + NEXT, tmp.start, fromBits, fromNext, fromStartIndex);
if (s == -1) return -1;
return 0;
}
toIndex = toNext + toStartIndex;
toBits = graphGet(toIndex + threadIdx.x);
toBase = getValAtThread(toBits, BASE);
toNext = getValAtThread(toBits, NEXT);
}
}
}
__device__ uint unionAll(uint toRel, uint fromRel, uint to, uint numFroms, uint *p, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
uint startIndex_dst1 = (dev_heapSize / 2) * dst1.start;
uint virtualNumPartialVars_dst1 = roundToNextMultipleOf(dst1.lastVar - dst1.firstVar + 1);
uint startIndex_dst2 = (dev_heapSize / 2) * dst2.start;
uint virtualNumPartialVars_dst2 = roundToNextMultipleOf(dst2.lastVar - dst2.firstVar + 1);
for (uint i = 0; i < numFroms; i++) {
if (p[i] >= dst1.firstVar && p[i] <= dst1.lastVar) {
uint headIndex1 = startIndex_dst1 + dst1.offset + virtualNumPartialVars_dst1 * (fromRel - 1) + p[i] - dst1.firstVar;
uint fromIndex1 = graphGet(headIndex1);
if (fromIndex1 != NIL) {
uint s = union2(to, toRel, tmp, fromIndex1 + startIndex_dst1, dst1.start);
if (s == -1) return -1;
}
}
if (dst2.flag) {
if (p[i] >= dst2.firstVar && p[i] <= dst2.lastVar) {
uint headIndex2 = startIndex_dst2 + dst2.offset + virtualNumPartialVars_dst2 * (fromRel - 1) + p[i] - dst2.firstVar;
uint fromIndex2 = graphGet(headIndex2);
if (fromIndex2 != NIL) {
uint s = union2(to, toRel, tmp, fromIndex2 + startIndex_dst2, dst2.start);
if (s == -1) return -1;
}
}
}
}
return 0;
}
__device__ uint decode(uint toRel, uint fromRel, uint myBits, uint base, uint i, uint *p, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
for (int j = 0; j < BASE; j++) {
uint bits = getValAtThread(myBits, j);
if (bits) {
uint numOnes = __popc(bits);
for (int k = 0; k < 32 / blockDim.x; k++) {
uint threadId = threadIdx.x + blockDim.x * k;
uint threadMask = 1 << threadId;
uint myMask = threadMask - 1;
uint var = base * ELEMENT_CARDINALITY + mul32(j) + threadId;
uint bitActive = bits & threadMask;
uint pos = __popc(bits & myMask);
if (bitActive) p[pos] = var;
}
uint s = unionAll(toRel, fromRel, i, numOnes, p, dst1, dst2, tmp);
if (s == -1) return -1;
}
}
return 0;
}
__device__ uint apply(uint firstRel, uint secondRel, uint thirdRel, uint i, uint *p, ComputeRegion src, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
uint startIndex = (dev_heapSize / 2) * src.start;
uint headIndex = startIndex + src.offset + roundToNextMultipleOf(src.lastVar - src.firstVar + 1) * (firstRel - 1) + i;
uint index = graphGet(headIndex);
while (index != NIL) {
index += startIndex;
uint myBits = graphGet(index + threadIdx.x);
uint base = getValAtThread(myBits, BASE);
uint s = decode(thirdRel, secondRel, myBits, base, i, p, dst1, dst2, tmp);
if (s == -1) return -1;
index = getValAtThread(myBits, NEXT);
}
return 0;
}
/*
__global__ void addEdges(uint* keys, uint* valIndex, const uint numKeys, uint* val1, uint* val2, uint firstVar, uint lastVar, int start, uint offset) {
__shared__ uint temp[THREADS_PER_BLOCK / WARP_SIZE * 64];
uint* p = &temp[threadIdx.y * 64];
uint startIndex = (dev_heapSize / 2) * start;
uint virtualNumVars = roundToNextMultipleOf(lastVar - firstVar + 1);
uint i = getAndIncrement(1);
while (i < numKeys) {
uint src = keys[i];
uint begin = valIndex[i];
uint end = valIndex[i + 1];
uint virtualBegin = roundToPrevMultipleOf(begin); // to ensure alignment
for (int j = virtualBegin; j < end; j += WARP_SIZE) {
uint myIndex = j + threadIdx.x;
p[threadIdx.x] = myIndex < end ? val1[myIndex] : NIL;
p[threadIdx.x + 32] = myIndex < end ? val2[myIndex] : NIL;
uint beginK = max((int)begin - j, 0);
uint endK = min(end - j, WARP_SIZE);
for (int k = beginK; k < endK; k++) {
uint dst = p[k];
uint rel = p[k + 32];
uint headIndex = startIndex + offset + virtualNumVars * (rel - 1) + src - firstVar;
uint index = getIndex(headIndex, start);
if (index == -1) {
break;
}
uint s = insert(index + startIndex, dst, start);
if (s == -1) {
break;
}
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
*/
// for complete rules
__global__ void compute11(ComputeRegion src, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
__shared__ uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH * 32];
uint *p = &temp[threadIdx.y * 32];
uint numPartialVars = src.lastVar - src.firstVar + 1;
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numRules; j++) {
if (dev_rules[j * 3 + 1] != 0 && dev_rules[j * 3 + 2] != 0) {
uint s = apply(dev_rules[j * 3 + 1], dev_rules[j * 3 + 2], dev_rules[j * 3], i, p, src, dst1, dst2, tmp);
if (s == -1) break;
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
// for rules which have two labels
__global__ void compute10(uint firstVar, uint lastVar, uint fromOffset, int start, ComputeRegion tmp)
{
uint startIndex = (dev_heapSize / 2) * start;
uint numPartialVars = lastVar - firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numRules; j++) {
if (dev_rules[j * 3 + 1] != 0 && dev_rules[j * 3 + 2] == 0) {
uint fromHeadIndex = startIndex + fromOffset + virtualNumPartialVars * (dev_rules[j * 3 + 1] - 1) + i;
uint fromIndex = graphGet(fromHeadIndex);
if (fromIndex != NIL) {
uint s = union2(i, dev_rules[j * 3], tmp, fromIndex + startIndex, start);
if (s == -1) break;
}
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
// for rules which have only one label
__global__ void compute00(uint firstVar, uint lastVar, uint tmpOffset, int start)
{
uint startIndex = (dev_heapSize / 2) * start;
uint numPartialVars = lastVar - firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numRules; j++) {
if (dev_rules[j * 3 + 1] == 0) {
uint headIndex = startIndex + tmpOffset + virtualNumPartialVars * (dev_rules[j * 3] - 1) + i;
uint index = getIndex(headIndex, start);
if (index == -1) break;
uint s = insert(index + startIndex, firstVar + i, start);
if (s == -1) break;
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
void spagpu_s(Partition &p, int start, uint heapSize, uint *elementPool, bool &r, uint filter)
{
std::cout << "Self-matching..." << std::flush;
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
if (p.tmpSize == 0) {
uint numPartialVars = p.lastVar - p.firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint headSize = virtualNumPartialVars * numLabels;
uint offset = p.oldSize + p.deltaSize;
if (offset + headSize > heapSize / 2) { // if the size of the partition exceeds the limit, return and repart
r = true;
std::cout << "Need Repartition." << std::endl;
return;
}
initialize(headSize, start, offset, heapSize, elementPool);
}
ComputeRegion empty(0, 0, 0, 0, false);
ComputeRegion tmp_s(p.firstVar, p.lastVar, start, p.oldSize + p.deltaSize, true);
if (p.oldSize == 0) {
hipLaunchKernelGGL(( compute00), dim3(blocks), dim3(threads), 0, 0, p.firstVar, p.lastVar, p.oldSize + p.deltaSize, start);
needRepartition(p, heapSize, r);
if (r) return;
}
if(filter == 0){
hipLaunchKernelGGL(( compute10), dim3(blocks), dim3(threads), 0, 0, p.firstVar, p.lastVar, p.oldSize, start, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
}
ComputeRegion new_s(p.firstVar, p.lastVar, start, p.oldSize, true);
if (p.oldSize != 0) {
ComputeRegion old_s(p.firstVar, p.lastVar, start, 0, true);
if(filter == 0){
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, old_s, new_s, empty, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
}
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new_s, old_s, new_s, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
} else {
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new_s, new_s, empty, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
}
uint poolSize;
if (start) {
cudaSafeCall(hipMemcpyFromSymbol(&poolSize, freeList2, sizeof(uint)));
} else {
cudaSafeCall(hipMemcpyFromSymbol(&poolSize, freeList1, sizeof(uint)));
}
p.tmpSize = poolSize - p.deltaSize - p.oldSize;
std::cout << "OK." << std::endl;
}
void spagpu_b(Partition &p1, Partition &p2, bool &r1, bool &r2, uint heapSize, uint *elementPool, uint filter)
{
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
if (p1.tmpSize == 0) {
uint numPartialVars1 = p1.lastVar - p1.firstVar + 1;
uint virtualNumPartialVars1 = roundToNextMultipleOf(numPartialVars1);
uint headSize1 = virtualNumPartialVars1 * numLabels;
uint offset1 = p1.oldSize + p1.deltaSize;
if (offset1 + headSize1 > heapSize / 2) {
r1 = true;
std::cout << "Need Repartition." << std::endl;
return;
}
initialize(headSize1, 0, offset1, heapSize, elementPool);
}
if (p2.tmpSize == 0) {
uint numPartialVars2 = p2.lastVar - p2.firstVar + 1;
uint virtualNumPartialVars2 = roundToNextMultipleOf(numPartialVars2);
uint headSize2 = virtualNumPartialVars2 * numLabels;
uint offset2 = p2.oldSize + p2.deltaSize;
if (offset2 + headSize2 > heapSize / 2) {
r2 = true;
std::cout << "Need Repartition." << std::endl;
return;
}
initialize(headSize2, 1, offset2, heapSize, elementPool);
}
ComputeRegion empty(0, 0, 0, 0, false);
ComputeRegion tmp1(p1.firstVar, p1.lastVar, 0, p1.oldSize + p1.deltaSize, true);
ComputeRegion tmp2(p2.firstVar, p2.lastVar, 1, p2.oldSize + p2.deltaSize, true);
std::cout << "## ITERATION 0 ##" << std::endl;
if (p1.oldSize != 0 && p2.deltaSize != 0) {
if(filter == 0){
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, old1, new2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
if (p2.oldSize != 0 && p2.deltaSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new1, old2, new2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
else {
if (p2.oldSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new1, old2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new1, new2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
}
uint poolSize1;
cudaSafeCall(hipMemcpyFromSymbol(&poolSize1, freeList1, sizeof(uint)));
p1.tmpSize = poolSize1 - p1.deltaSize - p1.oldSize;
if (p2.oldSize != 0 && p1.deltaSize != 0) {
if(filter == 0){
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, old2, new1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
if (p1.oldSize != 0 && p1.deltaSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new2, old1, new1, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
else {
if (p1.oldSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new2, old1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new2, new1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
}
uint poolSize2;
cudaSafeCall(hipMemcpyFromSymbol(&poolSize2, freeList2, sizeof(uint)));
p2.tmpSize = poolSize2 - p2.deltaSize - p2.oldSize;
}
void spagpu(Partition &p1, Partition &p2, bool &r1, bool &r2, uint heapSize, uint filter)
{
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
ComputeRegion empty(0, 0, 0, 0, false);
ComputeRegion tmp1(p1.firstVar, p1.lastVar, 0, p1.oldSize + p1.deltaSize, true);
ComputeRegion tmp2(p2.firstVar, p2.lastVar, 1, p2.oldSize + p2.deltaSize, true);
// repeat until a xed point is reached
int iterNo = 0;
for (;;) {
std::cout << "## ITERATION " << ++iterNo << " ##" << std::endl;
bool changed = false;
cudaSafeCall(hipMemcpyToSymbol(dev_changed, &changed, sizeof(bool)));
if (p1.oldSize != 0) {
if(filter == 0){
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, old1, tmp1, tmp2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new1, tmp1, tmp2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p1.oldSize != 0 && p1.deltaSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp1, old1, new1, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
} else {
if (p1.oldSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp1, old1, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp1, new1, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if (p2.oldSize != 0 && p2.deltaSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp1, old2, new2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
} else {
if (p2.oldSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp1, old2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp1, new2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if(filter == 0){
hipLaunchKernelGGL(( compute10), dim3(blocks), dim3(threads), 0, 0, p1.firstVar, p1.lastVar, p1.oldSize + p1.deltaSize, 0, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp1, tmp1, tmp2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
uint poolSize1;
cudaSafeCall(hipMemcpyFromSymbol(&poolSize1, freeList1, sizeof(uint)));
p1.tmpSize = poolSize1 - p1.deltaSize - p1.oldSize;
if (p2.oldSize != 0) {
if(filter == 0){
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, old2, tmp1, tmp2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, new2, tmp1, tmp2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p2.oldSize != 0 && p2.deltaSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp2, old2, new2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
} else {
if (p2.oldSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp2, old2, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp2, new2, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if (p1.oldSize != 0 && p1.deltaSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp2, old1, new1, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
} else {
if (p1.oldSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp2, old1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp2, new1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if(filter == 0){
hipLaunchKernelGGL(( compute10), dim3(blocks), dim3(threads), 0, 0, p2.firstVar, p2.lastVar, p2.oldSize + p2.deltaSize, 1, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
hipLaunchKernelGGL(( compute11), dim3(blocks), dim3(threads), 0, 0, tmp2, tmp1, tmp2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
uint poolSize2;
cudaSafeCall(hipMemcpyFromSymbol(&poolSize2, freeList2, sizeof(uint)));
p2.tmpSize = poolSize2 - p2.deltaSize - p2.oldSize;
cudaSafeCall(hipMemcpyFromSymbol(&changed, dev_changed, sizeof(bool)));
if (changed == false) break;
}
}
__device__ void computeDegreePerLabel(uint *degree_elements, uint *degree_edges, uint i, uint index, uint startIndex, uint *p)
{
do {
if (isFirstThreadOfVWarp()) degree_elements[i]++;
index += startIndex;
uint myBits = graphGet(index + threadIdx.x);
p[threadIdx.x] = threadIdx.x < BASE ? __popc(myBits) : 0;
int k = blockDim.x / 2;
while (k) {
if (threadIdx.x < k) p[threadIdx.x] += p[threadIdx.x + k];
k /= 2;
}
if (isFirstThreadOfVWarp()) degree_edges[i] += p[0];
index = getValAtThread(myBits, NEXT);
} while (index != NIL);
}
__global__ void computeDegree(uint *degree_elements, uint *degree_edges, uint numPartialVars, int start, uint offset)
{
__shared__ uint temp[THREADS_PER_BLOCK];
uint *p = &temp[threadIdx.y * blockDim.x];
uint startIndex = (dev_heapSize / 2) * start;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numLabels; j++) {
uint headIndex = startIndex + offset + virtualNumPartialVars * j + i;
uint index = graphGet(headIndex);
if (index != NIL)
computeDegreePerLabel(degree_elements, degree_edges, i, index, startIndex, p);
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
void getDegree(Partition p, int start, uint *degree)
{
uint numPartialVars = p.lastVar - p.firstVar + 1;
uint *host_degree = new uint[numPartialVars * 6]();
uint *dev_degree;
size_t size = numPartialVars * 6 * sizeof(uint);
cudaSafeCall(hipMalloc((void **)&dev_degree, size));
cudaSafeCall(hipMemset(dev_degree, 0, size));
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
if (p.oldSize != 0)
hipLaunchKernelGGL(( computeDegree), dim3(blocks), dim3(threads), 0, 0, dev_degree + numPartialVars * 3, dev_degree, numPartialVars, start, 0);
if (p.deltaSize != 0)
hipLaunchKernelGGL(( computeDegree), dim3(blocks), dim3(threads), 0, 0, dev_degree + numPartialVars * 4, dev_degree + numPartialVars, numPartialVars, start, p.oldSize);
if (p.tmpSize != 0)
hipLaunchKernelGGL(( computeDegree), dim3(blocks), dim3(threads), 0, 0, dev_degree + numPartialVars * 5, dev_degree + numPartialVars * 2, numPartialVars, start, p.oldSize + p.deltaSize);
cudaSafeCall(hipMemcpy(host_degree, dev_degree, size, D2H));
hipFree(dev_degree);
for (int i = 0; i < 6; i++)
memcpy(degree + p.firstVar + numVars * i, host_degree + numPartialVars * i, numPartialVars * sizeof(uint));
delete[] host_degree;
}
__global__ void merge(ComputeRegion old, uint fromOffset, int fromStart)
{
uint numPartialVars = old.lastVar - old.firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numLabels; j++) {
uint fromHeadIndex = fromStartIndex + fromOffset + virtualNumPartialVars * j + i;
uint fromIndex = graphGet(fromHeadIndex);
if (fromIndex != NIL)
union2(i, j + 1, old, fromIndex + fromStartIndex, fromStart);
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
__device__ bool ballot(uint myBits)
{
__shared__ volatile bool temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (isFirstThreadOfVWarp())
temp[threadIdx.y] = false;
if (threadIdx.x < BASE && myBits != 0)
temp[threadIdx.y] = true;
return temp[threadIdx.y];
}
__device__ void removeDuplicates(uint toHeadIndex, uint subHeadIndex, uint myBase, uint myBits, int toStart)
{
uint toStartIndex = (dev_heapSize / 2) * toStart;
uint subIndex = graphGet(subHeadIndex);
if (subIndex == NIL) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
return;
}
subIndex += toStartIndex;
uint subBits = graphGet(subIndex + threadIdx.x);
uint subBase = getValAtThread(subBits, BASE);
uint subNext = getValAtThread(subBits, NEXT);
for (;;) {
if (subBase > myBase) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
return;
} else if (subBase == myBase) {
if (threadIdx.x < BASE)
myBits &= ~subBits;
bool nonEmpty = ballot(myBits);
if (nonEmpty) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
}
return;
} else {
if (subNext == NIL) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
return;
}
subIndex = subNext + toStartIndex;
subBits = graphGet(subIndex + threadIdx.x);
subBase = getValAtThread(subBits, BASE);
subNext = getValAtThread(subBits, NEXT);
}
}
}
__device__ void computeDiff(uint toHeadIndex, uint fromIndex, uint subHeadIndex, int toStart, int fromStart)
{
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
do {
fromIndex += fromStartIndex;
uint myBits = graphGet(fromIndex + threadIdx.x);
uint myBase = getValAtThread(myBits, BASE);
fromIndex = getValAtThread(myBits, NEXT);
if (threadIdx.x == NEXT) myBits = NIL;
removeDuplicates(toHeadIndex, subHeadIndex, myBase, myBits, toStart);
} while (fromIndex != NIL);
}
__global__ void diff(uint numPartialVars, uint toOffset, int toStart, uint fromOffset, int fromStart)
{
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
uint toStartIndex = (dev_heapSize / 2) * toStart;
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numLabels; j++) {
uint fromHeadIndex = fromStartIndex + fromOffset + virtualNumPartialVars * j + i;
uint fromIndex = graphGet(fromHeadIndex);
if (fromIndex == NIL) continue;
uint subHeadIndex = toStartIndex + virtualNumPartialVars * j + i;
uint toHeadIndex = toStartIndex + toOffset + virtualNumPartialVars * j + i;
computeDiff(toHeadIndex, fromIndex, subHeadIndex, toStart, fromStart);
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
void mergeAndDiff(Partition &p, uint heapSize, uint *elementPool)
{
std::cout << "Updating..." << std::flush;
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
uint oldSize = p.oldSize;
uint newSize = p.deltaSize;
uint tmpSize = p.tmpSize;
if (newSize != 0) {
if (oldSize == 0) {
p.oldSize = newSize;
p.deltaSize = 0;
} else {
cudaSafeCall(hipMemcpy(elementPool + heapSize / 2 + oldSize, elementPool + oldSize, newSize * sizeof(uint), D2D));
uint poolSize = oldSize;
cudaSafeCall(hipMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
ComputeRegion old(p.firstVar, p.lastVar, 0, 0, true);
hipLaunchKernelGGL(( merge), dim3(blocks), dim3(threads), 0, 0, old, oldSize, 1);
cudaSafeCall(hipMemcpyFromSymbol(&poolSize, freeList1, sizeof(uint)));
p.oldSize = poolSize;
p.deltaSize = 0;
}
}
if (tmpSize != 0) {
uint fromOffset = oldSize + newSize;
cudaSafeCall(hipMemcpy(elementPool + heapSize / 2 + fromOffset, elementPool + fromOffset, tmpSize * sizeof(uint), D2D));
uint numPartialVars = p.lastVar - p.firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint headSize = virtualNumPartialVars * numLabels;
initialize(headSize, 0, p.oldSize, heapSize, elementPool);
hipLaunchKernelGGL(( diff), dim3(blocks), dim3(threads), 0, 0, numPartialVars, p.oldSize, 0, fromOffset, 1);
uint poolSize;
cudaSafeCall(hipMemcpyFromSymbol(&poolSize, freeList1, sizeof(uint)));
if (poolSize - p.oldSize == headSize) {
poolSize = p.oldSize;
cudaSafeCall(hipMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
} else {
p.deltaSize = poolSize - p.oldSize;
}
p.tmpSize = 0;
}
std::cout << "OK." << std::endl;
}
| bf53f16abf1d47b345cb55245a94b2049eaa255d.cu | #include "kernel.h"
int selectedDevice;
const int N = 21;
int numLabels;
uint numVars;
__device__ __constant__ int dev_numRules;
__device__ __constant__ int dev_rules[N * 3];
__device__ __constant__ int dev_numLabels;
__device__ __constant__ uint dev_heapSize;
__device__ __constant__ uint *dev_elementPool;
// index of the next free element in the corresponding free list
// in words
__device__ uint freeList1 = 0;
__device__ uint freeList2 = 0;
__device__ uint worklistIndex = 0;
__device__ uint counter = 0;
__device__ bool dev_r = false;
__device__ bool dev_changed = false;
void transferRules(int numRules, int *rules)
{
if (numRules > N) {
std::cerr << "N needs to be reset." << std::endl;
exit(-1);
}
cudaSafeCall(cudaMemcpyToSymbol(dev_numRules, &numRules, sizeof(int)));
cudaSafeCall(cudaMemcpyToSymbol(dev_rules, rules, numRules * sizeof(int) * 3));
cudaSafeCall(cudaMemcpyToSymbol(dev_numLabels, &numLabels, sizeof(int)));
}
void allocateElementPool(uint &heapSize, uint* &elementPool)
{
heapSize = (DEVMEMUSED_MB / 8 * 7) * 1024 * 256;
std::cout << "HEAP SIZE: " << heapSize << " (in 32-bit words)" << std::endl;
cudaSafeCall(cudaMalloc((void **)&elementPool, heapSize * sizeof(uint)));
cudaSafeCall(cudaMemcpyToSymbol(dev_heapSize, &heapSize, sizeof(uint)));
cudaSafeCall(cudaMemcpyToSymbol(dev_elementPool, &elementPool, sizeof(uint*)));
}
void transferElements(Partition p, uint *elements, int start, uint heapSize, uint *elementPool)
{
uint poolSize = p.oldSize + p.deltaSize + p.tmpSize;
if (start) {
cudaSafeCall(cudaMemcpyToSymbol(freeList2, &poolSize, sizeof(uint)));
} else {
cudaSafeCall(cudaMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
}
cudaSafeCall(cudaMemcpy(elementPool + (heapSize / 2) * start, elements, poolSize * sizeof(uint), H2D));
delete[] elements;
}
void initialize(uint headSize, int start, uint offset, uint heapSize, uint *elementPool)
{
uint poolSize = offset + headSize;
if (start) {
cudaSafeCall(cudaMemcpyToSymbol(freeList2, &poolSize, sizeof(uint)));
} else {
cudaSafeCall(cudaMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
}
cudaSafeCall(cudaMemset(elementPool + (heapSize / 2) * start + offset, -1, headSize * sizeof(uint)));
}
void needRepartition(Partition &p, uint heapSize, bool &r)
{
bool s = false;
cudaSafeCall(cudaMemcpyFromSymbol(&s, dev_r, sizeof(bool)));
if (s) {
p.tmpSize = heapSize / 2 - p.deltaSize - p.oldSize;
s = false;
cudaSafeCall(cudaMemcpyToSymbol(dev_r, &s, sizeof(bool)));
r = true;
std::cout << "Need Repartition." << std::endl;
}
}
__host__ inline uint getBlocks()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, selectedDevice);
return deviceProp.multiProcessorCount * 4;
}
__device__ inline uint getThreadIdInBlock()
{
return threadIdx.x + threadIdx.y * blockDim.x;
}
__device__ inline uint isFirstThreadOfBlock()
{
return !getThreadIdInBlock();
}
__device__ inline uint isFirstThreadOfVWarp()
{
return !threadIdx.x;
}
__device__ inline void graphSet(const uint pos, const uint val)
{
dev_elementPool[pos] = val;
}
__device__ inline uint graphGet(const uint pos)
{
return dev_elementPool[pos];
}
__device__ inline uint getAndIncrement(const uint delta)
{
__shared__ volatile uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (isFirstThreadOfVWarp())
temp[threadIdx.y] = atomicAdd(&worklistIndex, delta);
return temp[threadIdx.y];
}
__device__ inline void resetWorklistIndex()
{
__syncthreads();
if (isFirstThreadOfBlock() && atomicInc(&counter, gridDim.x - 1) == (gridDim.x - 1))
worklistIndex = 0;
}
__device__ uint getValAtThread(const uint myVal, const uint i)
{
__shared__ volatile uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (threadIdx.x == i)
temp[threadIdx.y] = myVal;
return temp[threadIdx.y];
}
__device__ inline uint mallocIn(int start, uint size = ELEMENT_WIDTH)
{
__shared__ volatile uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (isFirstThreadOfVWarp()) {
if (start) {
temp[threadIdx.y] = atomicAdd(&freeList2, size);
} else {
temp[threadIdx.y] = atomicAdd(&freeList1, size);
}
}
if (temp[threadIdx.y] + size > dev_heapSize / 2) {
dev_r = true;
return -1;
} else {
return temp[threadIdx.y];
}
}
__device__ inline uint getIndex(uint headIndex, int start)
{
uint index = graphGet(headIndex);
if (index == NIL) {
uint newIndex = mallocIn(start);
if (newIndex != -1) {
graphSet((dev_heapSize / 2) * start + newIndex + threadIdx.x, NIL);
graphSet(headIndex, newIndex);
}
return newIndex;
}
return index;
}
__device__ uint addElement(uint index, uint fromBase, uint fromBits, int start)
{
uint startIndex = (dev_heapSize / 2) * start;
for (;;) {
uint toBits = graphGet(index + threadIdx.x);
uint toBase = getValAtThread(toBits, BASE);
if (toBase == NIL) {
// can only happen if the list is empty
graphSet(index + threadIdx.x, fromBits);
return index;
}
if (toBase == fromBase) {
uint orBits = toBits | fromBits;
if (orBits != toBits && threadIdx.x < NEXT)
graphSet(index + threadIdx.x, orBits);
return index;
}
if (toBase < fromBase) {
uint toNext = getValAtThread(toBits, NEXT);
if (toNext == NIL) {
// appending
uint newIndex = mallocIn(start);
if (newIndex == -1) return -1;
graphSet(newIndex + startIndex + threadIdx.x, fromBits);
graphSet(index + NEXT, newIndex);
return newIndex + startIndex;
}
index = toNext + startIndex;
} else {
uint newIndex = mallocIn(start);
if (newIndex == -1) return -1;
graphSet(newIndex + startIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
graphSet(index + threadIdx.x, val);
return index;
}
}
}
__device__ uint insert(uint index, uint var, int start)
{
uint base = BASE_OF(var);
uint unit = UNIT_OF(var);
uint bit = BIT_OF(var);
uint myBits = 0;
if (threadIdx.x == unit) myBits = 1 << bit;
if (threadIdx.x == BASE) myBits = base;
if (threadIdx.x == NEXT) myBits = NIL;
return addElement(index, base, myBits, start);
}
__device__ uint clone(uint nextIndex, int toStart, uint fromBits, uint fromNext, uint fromStartIndex)
{
uint toStartIndex = (dev_heapSize / 2) * toStart;
for (;;) {
uint newIndex = mallocIn(toStart);
if (newIndex == -1) return -1;
dev_changed = true;
uint val = threadIdx.x == NEXT ? NIL : fromBits;
graphSet(newIndex + toStartIndex + threadIdx.x, val);
graphSet(nextIndex, newIndex);
if (fromNext == NIL) break;
fromBits = graphGet(fromNext + fromStartIndex + threadIdx.x);
fromNext = getValAtThread(fromBits, NEXT);
nextIndex = newIndex + toStartIndex + NEXT;
}
return 0;
}
__device__ uint union2(uint to, uint toRel, ComputeRegion tmp, uint fromIndex, int fromStart)
{
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
uint toStartIndex = (dev_heapSize / 2) * tmp.start;
uint fromBits = graphGet(fromIndex + threadIdx.x);
uint fromBase = getValAtThread(fromBits, BASE);
uint fromNext = getValAtThread(fromBits, NEXT);
uint toHeadIndex = toStartIndex + tmp.offset + roundToNextMultipleOf(tmp.lastVar - tmp.firstVar + 1) * (toRel - 1) + to;
uint toIndex = graphGet(toHeadIndex);
if (toIndex == NIL) {
uint s = clone(toHeadIndex, tmp.start, fromBits, fromNext, fromStartIndex);
if (s == -1) return -1;
return 0;
}
toIndex += toStartIndex;
uint toBits = graphGet(toIndex + threadIdx.x);
uint toBase = getValAtThread(toBits, BASE);
uint toNext = getValAtThread(toBits, NEXT);
for (;;) {
if (toBase > fromBase) {
uint newIndex = mallocIn(tmp.start);
if (newIndex == -1) return -1;
dev_changed = true;
graphSet(newIndex + toStartIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
graphSet(toIndex + threadIdx.x, val);
if (fromNext == NIL) return 0;
toIndex = newIndex + toStartIndex;
fromBits = graphGet(fromNext + fromStartIndex + threadIdx.x);
fromBase = getValAtThread(fromBits, BASE);
fromNext = getValAtThread(fromBits, NEXT);
} else if (toBase == fromBase) {
uint orBits = fromBits | toBits;
uint newBits = threadIdx.x == NEXT ? toNext : orBits;
if (newBits != toBits) dev_changed = true;
graphSet(toIndex + threadIdx.x, newBits);
if (fromNext == NIL) return 0;
fromBits = graphGet(fromNext + fromStartIndex + threadIdx.x);
fromBase = getValAtThread(fromBits, BASE);
fromNext = getValAtThread(fromBits, NEXT);
if (toNext == NIL) {
uint s = clone(toIndex + NEXT, tmp.start, fromBits, fromNext, fromStartIndex);
if (s == -1) return -1;
return 0;
}
toIndex = toNext + toStartIndex;
toBits = graphGet(toIndex + threadIdx.x);
toBase = getValAtThread(toBits, BASE);
toNext = getValAtThread(toBits, NEXT);
} else {
if (toNext == NIL) {
uint s = clone(toIndex + NEXT, tmp.start, fromBits, fromNext, fromStartIndex);
if (s == -1) return -1;
return 0;
}
toIndex = toNext + toStartIndex;
toBits = graphGet(toIndex + threadIdx.x);
toBase = getValAtThread(toBits, BASE);
toNext = getValAtThread(toBits, NEXT);
}
}
}
__device__ uint unionAll(uint toRel, uint fromRel, uint to, uint numFroms, uint *p, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
uint startIndex_dst1 = (dev_heapSize / 2) * dst1.start;
uint virtualNumPartialVars_dst1 = roundToNextMultipleOf(dst1.lastVar - dst1.firstVar + 1);
uint startIndex_dst2 = (dev_heapSize / 2) * dst2.start;
uint virtualNumPartialVars_dst2 = roundToNextMultipleOf(dst2.lastVar - dst2.firstVar + 1);
for (uint i = 0; i < numFroms; i++) {
if (p[i] >= dst1.firstVar && p[i] <= dst1.lastVar) {
uint headIndex1 = startIndex_dst1 + dst1.offset + virtualNumPartialVars_dst1 * (fromRel - 1) + p[i] - dst1.firstVar;
uint fromIndex1 = graphGet(headIndex1);
if (fromIndex1 != NIL) {
uint s = union2(to, toRel, tmp, fromIndex1 + startIndex_dst1, dst1.start);
if (s == -1) return -1;
}
}
if (dst2.flag) {
if (p[i] >= dst2.firstVar && p[i] <= dst2.lastVar) {
uint headIndex2 = startIndex_dst2 + dst2.offset + virtualNumPartialVars_dst2 * (fromRel - 1) + p[i] - dst2.firstVar;
uint fromIndex2 = graphGet(headIndex2);
if (fromIndex2 != NIL) {
uint s = union2(to, toRel, tmp, fromIndex2 + startIndex_dst2, dst2.start);
if (s == -1) return -1;
}
}
}
}
return 0;
}
__device__ uint decode(uint toRel, uint fromRel, uint myBits, uint base, uint i, uint *p, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
for (int j = 0; j < BASE; j++) {
uint bits = getValAtThread(myBits, j);
if (bits) {
uint numOnes = __popc(bits);
for (int k = 0; k < 32 / blockDim.x; k++) {
uint threadId = threadIdx.x + blockDim.x * k;
uint threadMask = 1 << threadId;
uint myMask = threadMask - 1;
uint var = base * ELEMENT_CARDINALITY + mul32(j) + threadId;
uint bitActive = bits & threadMask;
uint pos = __popc(bits & myMask);
if (bitActive) p[pos] = var;
}
uint s = unionAll(toRel, fromRel, i, numOnes, p, dst1, dst2, tmp);
if (s == -1) return -1;
}
}
return 0;
}
__device__ uint apply(uint firstRel, uint secondRel, uint thirdRel, uint i, uint *p, ComputeRegion src, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
uint startIndex = (dev_heapSize / 2) * src.start;
uint headIndex = startIndex + src.offset + roundToNextMultipleOf(src.lastVar - src.firstVar + 1) * (firstRel - 1) + i;
uint index = graphGet(headIndex);
while (index != NIL) {
index += startIndex;
uint myBits = graphGet(index + threadIdx.x);
uint base = getValAtThread(myBits, BASE);
uint s = decode(thirdRel, secondRel, myBits, base, i, p, dst1, dst2, tmp);
if (s == -1) return -1;
index = getValAtThread(myBits, NEXT);
}
return 0;
}
/*
__global__ void addEdges(uint* keys, uint* valIndex, const uint numKeys, uint* val1, uint* val2, uint firstVar, uint lastVar, int start, uint offset) {
__shared__ uint temp[THREADS_PER_BLOCK / WARP_SIZE * 64];
uint* p = &temp[threadIdx.y * 64];
uint startIndex = (dev_heapSize / 2) * start;
uint virtualNumVars = roundToNextMultipleOf(lastVar - firstVar + 1);
uint i = getAndIncrement(1);
while (i < numKeys) {
uint src = keys[i];
uint begin = valIndex[i];
uint end = valIndex[i + 1];
uint virtualBegin = roundToPrevMultipleOf(begin); // to ensure alignment
for (int j = virtualBegin; j < end; j += WARP_SIZE) {
uint myIndex = j + threadIdx.x;
p[threadIdx.x] = myIndex < end ? val1[myIndex] : NIL;
p[threadIdx.x + 32] = myIndex < end ? val2[myIndex] : NIL;
uint beginK = max((int)begin - j, 0);
uint endK = min(end - j, WARP_SIZE);
for (int k = beginK; k < endK; k++) {
uint dst = p[k];
uint rel = p[k + 32];
uint headIndex = startIndex + offset + virtualNumVars * (rel - 1) + src - firstVar;
uint index = getIndex(headIndex, start);
if (index == -1) {
break;
}
uint s = insert(index + startIndex, dst, start);
if (s == -1) {
break;
}
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
*/
// for complete rules
__global__ void compute11(ComputeRegion src, ComputeRegion dst1, ComputeRegion dst2, ComputeRegion tmp)
{
__shared__ uint temp[THREADS_PER_BLOCK / ELEMENT_WIDTH * 32];
uint *p = &temp[threadIdx.y * 32];
uint numPartialVars = src.lastVar - src.firstVar + 1;
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numRules; j++) {
if (dev_rules[j * 3 + 1] != 0 && dev_rules[j * 3 + 2] != 0) {
uint s = apply(dev_rules[j * 3 + 1], dev_rules[j * 3 + 2], dev_rules[j * 3], i, p, src, dst1, dst2, tmp);
if (s == -1) break;
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
// for rules which have two labels
__global__ void compute10(uint firstVar, uint lastVar, uint fromOffset, int start, ComputeRegion tmp)
{
uint startIndex = (dev_heapSize / 2) * start;
uint numPartialVars = lastVar - firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numRules; j++) {
if (dev_rules[j * 3 + 1] != 0 && dev_rules[j * 3 + 2] == 0) {
uint fromHeadIndex = startIndex + fromOffset + virtualNumPartialVars * (dev_rules[j * 3 + 1] - 1) + i;
uint fromIndex = graphGet(fromHeadIndex);
if (fromIndex != NIL) {
uint s = union2(i, dev_rules[j * 3], tmp, fromIndex + startIndex, start);
if (s == -1) break;
}
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
// for rules which have only one label
__global__ void compute00(uint firstVar, uint lastVar, uint tmpOffset, int start)
{
uint startIndex = (dev_heapSize / 2) * start;
uint numPartialVars = lastVar - firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numRules; j++) {
if (dev_rules[j * 3 + 1] == 0) {
uint headIndex = startIndex + tmpOffset + virtualNumPartialVars * (dev_rules[j * 3] - 1) + i;
uint index = getIndex(headIndex, start);
if (index == -1) break;
uint s = insert(index + startIndex, firstVar + i, start);
if (s == -1) break;
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
void spagpu_s(Partition &p, int start, uint heapSize, uint *elementPool, bool &r, uint filter)
{
std::cout << "Self-matching..." << std::flush;
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
if (p.tmpSize == 0) {
uint numPartialVars = p.lastVar - p.firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint headSize = virtualNumPartialVars * numLabels;
uint offset = p.oldSize + p.deltaSize;
if (offset + headSize > heapSize / 2) { // if the size of the partition exceeds the limit, return and repart
r = true;
std::cout << "Need Repartition." << std::endl;
return;
}
initialize(headSize, start, offset, heapSize, elementPool);
}
ComputeRegion empty(0, 0, 0, 0, false);
ComputeRegion tmp_s(p.firstVar, p.lastVar, start, p.oldSize + p.deltaSize, true);
if (p.oldSize == 0) {
compute00<<<blocks, threads>>>(p.firstVar, p.lastVar, p.oldSize + p.deltaSize, start);
needRepartition(p, heapSize, r);
if (r) return;
}
if(filter == 0){
compute10<<<blocks, threads>>>(p.firstVar, p.lastVar, p.oldSize, start, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
}
ComputeRegion new_s(p.firstVar, p.lastVar, start, p.oldSize, true);
if (p.oldSize != 0) {
ComputeRegion old_s(p.firstVar, p.lastVar, start, 0, true);
if(filter == 0){
compute11<<<blocks, threads>>>(old_s, new_s, empty, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
}
compute11<<<blocks, threads>>>(new_s, old_s, new_s, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
} else {
compute11<<<blocks, threads>>>(new_s, new_s, empty, tmp_s);
needRepartition(p, heapSize, r);
if (r) return;
}
uint poolSize;
if (start) {
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize, freeList2, sizeof(uint)));
} else {
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize, freeList1, sizeof(uint)));
}
p.tmpSize = poolSize - p.deltaSize - p.oldSize;
std::cout << "OK." << std::endl;
}
void spagpu_b(Partition &p1, Partition &p2, bool &r1, bool &r2, uint heapSize, uint *elementPool, uint filter)
{
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
if (p1.tmpSize == 0) {
uint numPartialVars1 = p1.lastVar - p1.firstVar + 1;
uint virtualNumPartialVars1 = roundToNextMultipleOf(numPartialVars1);
uint headSize1 = virtualNumPartialVars1 * numLabels;
uint offset1 = p1.oldSize + p1.deltaSize;
if (offset1 + headSize1 > heapSize / 2) {
r1 = true;
std::cout << "Need Repartition." << std::endl;
return;
}
initialize(headSize1, 0, offset1, heapSize, elementPool);
}
if (p2.tmpSize == 0) {
uint numPartialVars2 = p2.lastVar - p2.firstVar + 1;
uint virtualNumPartialVars2 = roundToNextMultipleOf(numPartialVars2);
uint headSize2 = virtualNumPartialVars2 * numLabels;
uint offset2 = p2.oldSize + p2.deltaSize;
if (offset2 + headSize2 > heapSize / 2) {
r2 = true;
std::cout << "Need Repartition." << std::endl;
return;
}
initialize(headSize2, 1, offset2, heapSize, elementPool);
}
ComputeRegion empty(0, 0, 0, 0, false);
ComputeRegion tmp1(p1.firstVar, p1.lastVar, 0, p1.oldSize + p1.deltaSize, true);
ComputeRegion tmp2(p2.firstVar, p2.lastVar, 1, p2.oldSize + p2.deltaSize, true);
std::cout << "## ITERATION 0 ##" << std::endl;
if (p1.oldSize != 0 && p2.deltaSize != 0) {
if(filter == 0){
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(old1, new2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
if (p2.oldSize != 0 && p2.deltaSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(new1, old2, new2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
else {
if (p2.oldSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
compute11<<<blocks, threads>>>(new1, old2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(new1, new2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
}
uint poolSize1;
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize1, freeList1, sizeof(uint)));
p1.tmpSize = poolSize1 - p1.deltaSize - p1.oldSize;
if (p2.oldSize != 0 && p1.deltaSize != 0) {
if(filter == 0){
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(old2, new1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
if (p1.oldSize != 0 && p1.deltaSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(new2, old1, new1, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
else {
if (p1.oldSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
compute11<<<blocks, threads>>>(new2, old1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(new2, new1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
}
uint poolSize2;
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize2, freeList2, sizeof(uint)));
p2.tmpSize = poolSize2 - p2.deltaSize - p2.oldSize;
}
void spagpu(Partition &p1, Partition &p2, bool &r1, bool &r2, uint heapSize, uint filter)
{
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
ComputeRegion empty(0, 0, 0, 0, false);
ComputeRegion tmp1(p1.firstVar, p1.lastVar, 0, p1.oldSize + p1.deltaSize, true);
ComputeRegion tmp2(p2.firstVar, p2.lastVar, 1, p2.oldSize + p2.deltaSize, true);
// repeat until a fixed point is reached
int iterNo = 0;
for (;;) {
std::cout << "## ITERATION " << ++iterNo << " ##" << std::endl;
bool changed = false;
cudaSafeCall(cudaMemcpyToSymbol(dev_changed, &changed, sizeof(bool)));
if (p1.oldSize != 0) {
if(filter == 0){
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
compute11<<<blocks, threads>>>(old1, tmp1, tmp2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(new1, tmp1, tmp2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p1.oldSize != 0 && p1.deltaSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(tmp1, old1, new1, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
} else {
if (p1.oldSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
compute11<<<blocks, threads>>>(tmp1, old1, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(tmp1, new1, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if (p2.oldSize != 0 && p2.deltaSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(tmp1, old2, new2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
} else {
if (p2.oldSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
compute11<<<blocks, threads>>>(tmp1, old2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(tmp1, new2, empty, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
}
if(filter == 0){
compute10<<<blocks, threads>>>(p1.firstVar, p1.lastVar, p1.oldSize + p1.deltaSize, 0, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
}
compute11<<<blocks, threads>>>(tmp1, tmp1, tmp2, tmp1);
needRepartition(p1, heapSize, r1);
if (r1) return;
uint poolSize1;
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize1, freeList1, sizeof(uint)));
p1.tmpSize = poolSize1 - p1.deltaSize - p1.oldSize;
if (p2.oldSize != 0) {
if(filter == 0){
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
compute11<<<blocks, threads>>>(old2, tmp1, tmp2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(new2, tmp1, tmp2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p2.oldSize != 0 && p2.deltaSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(tmp2, old2, new2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
} else {
if (p2.oldSize != 0) {
ComputeRegion old2(p2.firstVar, p2.lastVar, 1, 0, true);
compute11<<<blocks, threads>>>(tmp2, old2, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p2.deltaSize != 0) {
ComputeRegion new2(p2.firstVar, p2.lastVar, 1, p2.oldSize, true);
compute11<<<blocks, threads>>>(tmp2, new2, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if (p1.oldSize != 0 && p1.deltaSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(tmp2, old1, new1, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
} else {
if (p1.oldSize != 0) {
ComputeRegion old1(p1.firstVar, p1.lastVar, 0, 0, true);
compute11<<<blocks, threads>>>(tmp2, old1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
if (p1.deltaSize != 0) {
ComputeRegion new1(p1.firstVar, p1.lastVar, 0, p1.oldSize, true);
compute11<<<blocks, threads>>>(tmp2, new1, empty, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
}
if(filter == 0){
compute10<<<blocks, threads>>>(p2.firstVar, p2.lastVar, p2.oldSize + p2.deltaSize, 1, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
}
compute11<<<blocks, threads>>>(tmp2, tmp1, tmp2, tmp2);
needRepartition(p2, heapSize, r2);
if (r2) return;
uint poolSize2;
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize2, freeList2, sizeof(uint)));
p2.tmpSize = poolSize2 - p2.deltaSize - p2.oldSize;
cudaSafeCall(cudaMemcpyFromSymbol(&changed, dev_changed, sizeof(bool)));
if (changed == false) break;
}
}
__device__ void computeDegreePerLabel(uint *degree_elements, uint *degree_edges, uint i, uint index, uint startIndex, uint *p)
{
do {
if (isFirstThreadOfVWarp()) degree_elements[i]++;
index += startIndex;
uint myBits = graphGet(index + threadIdx.x);
p[threadIdx.x] = threadIdx.x < BASE ? __popc(myBits) : 0;
int k = blockDim.x / 2;
while (k) {
if (threadIdx.x < k) p[threadIdx.x] += p[threadIdx.x + k];
k /= 2;
}
if (isFirstThreadOfVWarp()) degree_edges[i] += p[0];
index = getValAtThread(myBits, NEXT);
} while (index != NIL);
}
__global__ void computeDegree(uint *degree_elements, uint *degree_edges, uint numPartialVars, int start, uint offset)
{
__shared__ uint temp[THREADS_PER_BLOCK];
uint *p = &temp[threadIdx.y * blockDim.x];
uint startIndex = (dev_heapSize / 2) * start;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numLabels; j++) {
uint headIndex = startIndex + offset + virtualNumPartialVars * j + i;
uint index = graphGet(headIndex);
if (index != NIL)
computeDegreePerLabel(degree_elements, degree_edges, i, index, startIndex, p);
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
void getDegree(Partition p, int start, uint *degree)
{
uint numPartialVars = p.lastVar - p.firstVar + 1;
uint *host_degree = new uint[numPartialVars * 6]();
uint *dev_degree;
size_t size = numPartialVars * 6 * sizeof(uint);
cudaSafeCall(cudaMalloc((void **)&dev_degree, size));
cudaSafeCall(cudaMemset(dev_degree, 0, size));
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
if (p.oldSize != 0)
computeDegree<<<blocks, threads>>>(dev_degree + numPartialVars * 3, dev_degree, numPartialVars, start, 0);
if (p.deltaSize != 0)
computeDegree<<<blocks, threads>>>(dev_degree + numPartialVars * 4, dev_degree + numPartialVars, numPartialVars, start, p.oldSize);
if (p.tmpSize != 0)
computeDegree<<<blocks, threads>>>(dev_degree + numPartialVars * 5, dev_degree + numPartialVars * 2, numPartialVars, start, p.oldSize + p.deltaSize);
cudaSafeCall(cudaMemcpy(host_degree, dev_degree, size, D2H));
cudaFree(dev_degree);
for (int i = 0; i < 6; i++)
memcpy(degree + p.firstVar + numVars * i, host_degree + numPartialVars * i, numPartialVars * sizeof(uint));
delete[] host_degree;
}
__global__ void merge(ComputeRegion old, uint fromOffset, int fromStart)
{
uint numPartialVars = old.lastVar - old.firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numLabels; j++) {
uint fromHeadIndex = fromStartIndex + fromOffset + virtualNumPartialVars * j + i;
uint fromIndex = graphGet(fromHeadIndex);
if (fromIndex != NIL)
union2(i, j + 1, old, fromIndex + fromStartIndex, fromStart);
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
__device__ bool ballot(uint myBits)
{
__shared__ volatile bool temp[THREADS_PER_BLOCK / ELEMENT_WIDTH];
if (isFirstThreadOfVWarp())
temp[threadIdx.y] = false;
if (threadIdx.x < BASE && myBits != 0)
temp[threadIdx.y] = true;
return temp[threadIdx.y];
}
__device__ void removeDuplicates(uint toHeadIndex, uint subHeadIndex, uint myBase, uint myBits, int toStart)
{
uint toStartIndex = (dev_heapSize / 2) * toStart;
uint subIndex = graphGet(subHeadIndex);
if (subIndex == NIL) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
return;
}
subIndex += toStartIndex;
uint subBits = graphGet(subIndex + threadIdx.x);
uint subBase = getValAtThread(subBits, BASE);
uint subNext = getValAtThread(subBits, NEXT);
for (;;) {
if (subBase > myBase) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
return;
} else if (subBase == myBase) {
if (threadIdx.x < BASE)
myBits &= ~subBits;
bool nonEmpty = ballot(myBits);
if (nonEmpty) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
}
return;
} else {
if (subNext == NIL) {
uint toIndex = getIndex(toHeadIndex, toStart);
addElement(toIndex + toStartIndex, myBase, myBits, toStart);
return;
}
subIndex = subNext + toStartIndex;
subBits = graphGet(subIndex + threadIdx.x);
subBase = getValAtThread(subBits, BASE);
subNext = getValAtThread(subBits, NEXT);
}
}
}
__device__ void computeDiff(uint toHeadIndex, uint fromIndex, uint subHeadIndex, int toStart, int fromStart)
{
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
do {
fromIndex += fromStartIndex;
uint myBits = graphGet(fromIndex + threadIdx.x);
uint myBase = getValAtThread(myBits, BASE);
fromIndex = getValAtThread(myBits, NEXT);
if (threadIdx.x == NEXT) myBits = NIL;
removeDuplicates(toHeadIndex, subHeadIndex, myBase, myBits, toStart);
} while (fromIndex != NIL);
}
__global__ void diff(uint numPartialVars, uint toOffset, int toStart, uint fromOffset, int fromStart)
{
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint fromStartIndex = (dev_heapSize / 2) * fromStart;
uint toStartIndex = (dev_heapSize / 2) * toStart;
uint i = getAndIncrement(1);
while (i < numPartialVars) {
for (int j = 0; j < dev_numLabels; j++) {
uint fromHeadIndex = fromStartIndex + fromOffset + virtualNumPartialVars * j + i;
uint fromIndex = graphGet(fromHeadIndex);
if (fromIndex == NIL) continue;
uint subHeadIndex = toStartIndex + virtualNumPartialVars * j + i;
uint toHeadIndex = toStartIndex + toOffset + virtualNumPartialVars * j + i;
computeDiff(toHeadIndex, fromIndex, subHeadIndex, toStart, fromStart);
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
void mergeAndDiff(Partition &p, uint heapSize, uint *elementPool)
{
std::cout << "Updating..." << std::flush;
uint blocks = getBlocks();
dim3 threads(ELEMENT_WIDTH, THREADS_PER_BLOCK / ELEMENT_WIDTH);
uint oldSize = p.oldSize;
uint newSize = p.deltaSize;
uint tmpSize = p.tmpSize;
if (newSize != 0) {
if (oldSize == 0) {
p.oldSize = newSize;
p.deltaSize = 0;
} else {
cudaSafeCall(cudaMemcpy(elementPool + heapSize / 2 + oldSize, elementPool + oldSize, newSize * sizeof(uint), D2D));
uint poolSize = oldSize;
cudaSafeCall(cudaMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
ComputeRegion old(p.firstVar, p.lastVar, 0, 0, true);
merge<<<blocks, threads>>>(old, oldSize, 1);
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize, freeList1, sizeof(uint)));
p.oldSize = poolSize;
p.deltaSize = 0;
}
}
if (tmpSize != 0) {
uint fromOffset = oldSize + newSize;
cudaSafeCall(cudaMemcpy(elementPool + heapSize / 2 + fromOffset, elementPool + fromOffset, tmpSize * sizeof(uint), D2D));
uint numPartialVars = p.lastVar - p.firstVar + 1;
uint virtualNumPartialVars = roundToNextMultipleOf(numPartialVars);
uint headSize = virtualNumPartialVars * numLabels;
initialize(headSize, 0, p.oldSize, heapSize, elementPool);
diff<<<blocks, threads>>>(numPartialVars, p.oldSize, 0, fromOffset, 1);
uint poolSize;
cudaSafeCall(cudaMemcpyFromSymbol(&poolSize, freeList1, sizeof(uint)));
if (poolSize - p.oldSize == headSize) {
poolSize = p.oldSize;
cudaSafeCall(cudaMemcpyToSymbol(freeList1, &poolSize, sizeof(uint)));
} else {
p.deltaSize = poolSize - p.oldSize;
}
p.tmpSize = 0;
}
std::cout << "OK." << std::endl;
}
|
d4dbdbf62d438d9462e07d43d281573afe4f21b0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <fstream>
#include <hip/hip_runtime_api.h>
#include "helper.h"
#include <float.h>
/////////////////////////////////////////////////////////////////////////
// Init
/////////////////////////////////////////////////////////////////////////
uint64_t noOfTimeSeries = 20;
uint64_t lenOfTimeSeries = 70;
uint64_t noOfTestTimeSeries = 10;
/////////////////////////////////////////////////////////////////////////
void usage(){
printf("********************************\n");
printf("************* USAGE ************\n");
printf("********************************\n");
printf("./classification-ed [training-file] [number-of-time-series] [length-of-time-series] [testing-file] [number-of-times-series-in-test]\n");
printf("eg. ./classification-ed SonyAIBORobotSurface_TRAIN 20 70 \n");
printf("********************************\n");
}
void readfile(char* inputFileName,float* _data,int* _class,uint64_t len)
{
std::ifstream in_file;
in_file.open(inputFileName);
if(!in_file) {
printf("\nFile Not Found !");
exit(1);
}
float class_in;
float data_in;
long int i, j;
for(i=0; i<len; i++)
{
in_file >> class_in;
_class[i] = (int)class_in;
//printf("class : %d\n",_class[i]);
for (j=0; j<lenOfTimeSeries; j++)
{
in_file >> data_in;
_data[i*lenOfTimeSeries+j] = data_in;
//printf("%f, ",_data[i*lenOfTimeSeries+j]);
}
//printf("\n");
}
in_file.close();
}
////////////////////////////////////////////////////////////////
__device__ void normalize(float* d_data, float mean, float stdev, uint64_t t, float* norm_data, const int L)
{
int i = 0;
for(i=0; i<L; i++)
{
norm_data[i] = (d_data[t+i]-mean)/stdev;
}
}
////////////////////////////////////////////////////////////////
__global__ void Euclidean_Distance(float* trainingData, float* testData, float* output, int length)
{
int i;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float temp[1024];
float sum = 0, sum_sqr = 0, mean = 0, mean_sqr = 0, variance = 0, std_dev = 0;
int t = length*(idx/length) + idx%length;
for(i=t; i<t+length; i++)
{
sum += trainingData[i];
sum_sqr += trainingData[i] * trainingData[i];
}
mean = sum / length;
mean_sqr = mean*mean;
variance = (sum_sqr/length) - mean_sqr;
std_dev = sqrt(variance);
i = 0;
for(; i<length; i++)
temp[i] = (trainingData[t+i]-mean) / std_dev;
float errorSummation = 0;
for(i=0; i < length; i++)
{
errorSummation += (temp[i] - testData[i])*(temp[i] - testData[i]);
}
errorSummation = sqrt(errorSummation);
output[idx] = errorSummation;
}
int main(int argc, char * argv[])
{
clock_t start, end;
fprintf(stderr, "Initializing ... \n");
char* inputFileName = argv[1];
int isDefault = 0;
if(!inputFileName){
printf("No test file provided. Using default file : SonyAIBORobotSurface_TRAIN\n");
inputFileName = "SonyAIBORobotSurface_TRAIN";
isDefault = 1;
}
if(argc > 1){
noOfTimeSeries = atoi(argv[2]);
}else{
if(isDefault == 0){
printf("Number of time series not provided. Exiting\n");
exit(0);
}
}
if(argc > 2){
lenOfTimeSeries = atoi(argv[3]);
}
else{
if(isDefault == 0){
printf("Length of time series not provided. Exiting\n");
exit(0);
}
}
uint64_t train_size = noOfTimeSeries * lenOfTimeSeries * sizeof(float);
uint64_t test_size;// = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float);
//storage allocation for train data and train class labels
float* train_data = (float*) malloc(train_size);
int* train_class = (int *) malloc(noOfTimeSeries*sizeof(int));
//storage allocation for test data and test class labels
float* test_data;// = (float*) malloc (test_size);
int* test_class;// = (int *) malloc(noOfTestTimeSeries * sizeof(int));
//get training file
printf("Reading train file\n");
//read training file
readfile(inputFileName, train_data, train_class, noOfTimeSeries);
printf("===================================================\n");
printf("Training File : %s\n",inputFileName);
printf("Number of Time Series : %d\n",noOfTimeSeries);
printf("Length of Time Series : %d\n",lenOfTimeSeries);
// If Testing File is provided
if(argc == 6 || isDefault == 1){
char* testFileName;
if(isDefault == 0){
testFileName = argv[4];
noOfTestTimeSeries = atoi(argv[5]);
}else{
testFileName = "SonyAIBORobotSurface_TEST";
noOfTestTimeSeries = 601;
}
printf("-----------------------------------------------------\n");
//get testing file
printf("Reading test file\n");
test_size = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float);
test_data = (float*) malloc (test_size);
test_class = (int *) malloc(noOfTestTimeSeries * sizeof(int));
//read test file
readfile(testFileName, test_data, test_class, noOfTestTimeSeries);
printf("Testing File : %s\n",testFileName);
printf("Number of Time Series to validate: %d\n",noOfTestTimeSeries);
}
printf("===================================================\n");
hipProfilerStart();
//GPU number present in the system
int noOfGPUs;
checkCudaErrors(hipGetDeviceCount(&noOfGPUs));
printf("Total GPUs on System : %d\n", noOfGPUs);
int threadsPerBlock = min((int)ceil(lenOfTimeSeries/(float)32)*32,1024);
int noOfBlocks = ceil((noOfTimeSeries*lenOfTimeSeries)/(float)threadsPerBlock);
printf("noOfBlocks %d threadsPerBlock %d\n",noOfBlocks ,threadsPerBlock);
//allocate memory for subsequences
float* subseq = (float*)malloc(train_size);
//allocate memory on device and copy required data to GPU memory
float* d_subseq = 0;
hipMalloc((void**)&d_subseq, train_size);
float* d_test_series = 0;
hipMalloc((void**)&d_test_series,lenOfTimeSeries*sizeof(float));
float* d_train_data = 0;
hipMalloc((void**)&d_train_data, train_size);
hipMemcpy(d_train_data, train_data, train_size, hipMemcpyHostToDevice);
start = clock();
int i = 0;
int errorCount = 0 , minIndex = -1;
for (i=0;i < noOfTestTimeSeries;i++)
{
hipMemcpy(d_test_series, test_data+(lenOfTimeSeries*i), lenOfTimeSeries*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Euclidean_Distance), dim3(noOfBlocks), dim3(threadsPerBlock), 0, 0, d_train_data, d_test_series, d_subseq, lenOfTimeSeries);
hipDeviceSynchronize();
hipMemcpy(subseq, d_subseq, train_size, hipMemcpyDeviceToHost);
float minDistance = FLT_MAX;
minIndex = -1;
int j = 0;
for(; j < noOfTimeSeries ; j++ )
{
if ( minDistance > subseq[j*lenOfTimeSeries] )
{
minDistance = subseq[j*lenOfTimeSeries];
minIndex = j;
}
}
if( train_class[minIndex] != test_class[i] )
errorCount++;
//printf("%d\t%d\t %d\t%d\t%3.6f\n",i , test_class[i] ,train_class[minIndex], minIndex , minDistance );
}
hipFree(d_train_data);
hipFree(d_subseq);
hipFree(d_test_series);
free(train_class);
free(test_class);
end = clock() - start;
double endtime = (double)end / ((double)CLOCKS_PER_SEC);
printf("Total Time GPU : %f\n", endtime);
printf("Accuracy is %f\n",(float)(noOfTestTimeSeries-errorCount)*(100.0/noOfTestTimeSeries));
hipProfilerStop();
return 0;
}
| d4dbdbf62d438d9462e07d43d281573afe4f21b0.cu | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <fstream>
#include <cuda_profiler_api.h>
#include "helper.h"
#include <float.h>
/////////////////////////////////////////////////////////////////////////
// Init
/////////////////////////////////////////////////////////////////////////
uint64_t noOfTimeSeries = 20;
uint64_t lenOfTimeSeries = 70;
uint64_t noOfTestTimeSeries = 10;
/////////////////////////////////////////////////////////////////////////
void usage(){
printf("********************************\n");
printf("************* USAGE ************\n");
printf("********************************\n");
printf("./classification-ed [training-file] [number-of-time-series] [length-of-time-series] [testing-file] [number-of-times-series-in-test]\n");
printf("eg. ./classification-ed SonyAIBORobotSurface_TRAIN 20 70 \n");
printf("********************************\n");
}
void readfile(char* inputFileName,float* _data,int* _class,uint64_t len)
{
std::ifstream in_file;
in_file.open(inputFileName);
if(!in_file) {
printf("\nFile Not Found !");
exit(1);
}
float class_in;
float data_in;
long int i, j;
for(i=0; i<len; i++)
{
in_file >> class_in;
_class[i] = (int)class_in;
//printf("class : %d\n",_class[i]);
for (j=0; j<lenOfTimeSeries; j++)
{
in_file >> data_in;
_data[i*lenOfTimeSeries+j] = data_in;
//printf("%f, ",_data[i*lenOfTimeSeries+j]);
}
//printf("\n");
}
in_file.close();
}
////////////////////////////////////////////////////////////////
__device__ void normalize(float* d_data, float mean, float stdev, uint64_t t, float* norm_data, const int L)
{
int i = 0;
for(i=0; i<L; i++)
{
norm_data[i] = (d_data[t+i]-mean)/stdev;
}
}
////////////////////////////////////////////////////////////////
__global__ void Euclidean_Distance(float* trainingData, float* testData, float* output, int length)
{
int i;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float temp[1024];
float sum = 0, sum_sqr = 0, mean = 0, mean_sqr = 0, variance = 0, std_dev = 0;
int t = length*(idx/length) + idx%length;
for(i=t; i<t+length; i++)
{
sum += trainingData[i];
sum_sqr += trainingData[i] * trainingData[i];
}
mean = sum / length;
mean_sqr = mean*mean;
variance = (sum_sqr/length) - mean_sqr;
std_dev = sqrt(variance);
i = 0;
for(; i<length; i++)
temp[i] = (trainingData[t+i]-mean) / std_dev;
float errorSummation = 0;
for(i=0; i < length; i++)
{
errorSummation += (temp[i] - testData[i])*(temp[i] - testData[i]);
}
errorSummation = sqrt(errorSummation);
output[idx] = errorSummation;
}
int main(int argc, char * argv[])
{
clock_t start, end;
fprintf(stderr, "Initializing ... \n");
char* inputFileName = argv[1];
int isDefault = 0;
if(!inputFileName){
printf("No test file provided. Using default file : SonyAIBORobotSurface_TRAIN\n");
inputFileName = "SonyAIBORobotSurface_TRAIN";
isDefault = 1;
}
if(argc > 1){
noOfTimeSeries = atoi(argv[2]);
}else{
if(isDefault == 0){
printf("Number of time series not provided. Exiting\n");
exit(0);
}
}
if(argc > 2){
lenOfTimeSeries = atoi(argv[3]);
}
else{
if(isDefault == 0){
printf("Length of time series not provided. Exiting\n");
exit(0);
}
}
uint64_t train_size = noOfTimeSeries * lenOfTimeSeries * sizeof(float);
uint64_t test_size;// = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float);
//storage allocation for train data and train class labels
float* train_data = (float*) malloc(train_size);
int* train_class = (int *) malloc(noOfTimeSeries*sizeof(int));
//storage allocation for test data and test class labels
float* test_data;// = (float*) malloc (test_size);
int* test_class;// = (int *) malloc(noOfTestTimeSeries * sizeof(int));
//get training file
printf("Reading train file\n");
//read training file
readfile(inputFileName, train_data, train_class, noOfTimeSeries);
printf("===================================================\n");
printf("Training File : %s\n",inputFileName);
printf("Number of Time Series : %d\n",noOfTimeSeries);
printf("Length of Time Series : %d\n",lenOfTimeSeries);
// If Testing File is provided
if(argc == 6 || isDefault == 1){
char* testFileName;
if(isDefault == 0){
testFileName = argv[4];
noOfTestTimeSeries = atoi(argv[5]);
}else{
testFileName = "SonyAIBORobotSurface_TEST";
noOfTestTimeSeries = 601;
}
printf("-----------------------------------------------------\n");
//get testing file
printf("Reading test file\n");
test_size = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float);
test_data = (float*) malloc (test_size);
test_class = (int *) malloc(noOfTestTimeSeries * sizeof(int));
//read test file
readfile(testFileName, test_data, test_class, noOfTestTimeSeries);
printf("Testing File : %s\n",testFileName);
printf("Number of Time Series to validate: %d\n",noOfTestTimeSeries);
}
printf("===================================================\n");
cudaProfilerStart();
//GPU number present in the system
int noOfGPUs;
checkCudaErrors(cudaGetDeviceCount(&noOfGPUs));
printf("Total GPUs on System : %d\n", noOfGPUs);
int threadsPerBlock = min((int)ceil(lenOfTimeSeries/(float)32)*32,1024);
int noOfBlocks = ceil((noOfTimeSeries*lenOfTimeSeries)/(float)threadsPerBlock);
printf("noOfBlocks %d threadsPerBlock %d\n",noOfBlocks ,threadsPerBlock);
//allocate memory for subsequences
float* subseq = (float*)malloc(train_size);
//allocate memory on device and copy required data to GPU memory
float* d_subseq = 0;
cudaMalloc((void**)&d_subseq, train_size);
float* d_test_series = 0;
cudaMalloc((void**)&d_test_series,lenOfTimeSeries*sizeof(float));
float* d_train_data = 0;
cudaMalloc((void**)&d_train_data, train_size);
cudaMemcpy(d_train_data, train_data, train_size, cudaMemcpyHostToDevice);
start = clock();
int i = 0;
int errorCount = 0 , minIndex = -1;
for (i=0;i < noOfTestTimeSeries;i++)
{
cudaMemcpy(d_test_series, test_data+(lenOfTimeSeries*i), lenOfTimeSeries*sizeof(float), cudaMemcpyHostToDevice);
Euclidean_Distance<<<noOfBlocks, threadsPerBlock>>>(d_train_data, d_test_series, d_subseq, lenOfTimeSeries);
cudaThreadSynchronize();
cudaMemcpy(subseq, d_subseq, train_size, cudaMemcpyDeviceToHost);
float minDistance = FLT_MAX;
minIndex = -1;
int j = 0;
for(; j < noOfTimeSeries ; j++ )
{
if ( minDistance > subseq[j*lenOfTimeSeries] )
{
minDistance = subseq[j*lenOfTimeSeries];
minIndex = j;
}
}
if( train_class[minIndex] != test_class[i] )
errorCount++;
//printf("%d\t%d\t %d\t%d\t%3.6f\n",i , test_class[i] ,train_class[minIndex], minIndex , minDistance );
}
cudaFree(d_train_data);
cudaFree(d_subseq);
cudaFree(d_test_series);
free(train_class);
free(test_class);
end = clock() - start;
double endtime = (double)end / ((double)CLOCKS_PER_SEC);
printf("Total Time GPU : %f\n", endtime);
printf("Accuracy is %f\n",(float)(noOfTestTimeSeries-errorCount)*(100.0/noOfTestTimeSeries));
cudaProfilerStop();
return 0;
}
|
7f303357a05c2a03d7f6726f966cc5ddc58b8022.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
void modified_bessel_i0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_i0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_cuda);
} // namespace at::native
| 7f303357a05c2a03d7f6726f966cc5ddc58b8022.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
void modified_bessel_i0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_i0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_cuda);
} // namespace at::native
|
f2f9f505671f247e43c45cd1016f8617e4a26ba3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Image.h"
#include "Kernel.h"
#include "KernelFactory.h"
#include "CUDAError.h"
#include "filteringUtils.h"
#include "speedTests.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <sstream>
#include <cstdio>
#include <ctime>
#include <hiprand/hiprand_kernel.h>
#include <omp.h>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define TILE_WIDTH 32
#define w (TILE_WIDTH + 3 - 1)
#define KERNEL_SIZE 3
double CUDAWithTiling(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA tiling filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
float* identity = kernel->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* resultDevice;
CUDA_CHECK_RETURN(hipMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(hipMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(hipMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
dim3 blockDim(TILE_WIDTH, TILE_WIDTH);
dim3 gridDim(ceil(((float) widthResult) / TILE_WIDTH), ceil(((float) heightResult) / TILE_WIDTH));
// Invocazione del kernel
hipLaunchKernelGGL(( tiling), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_tiling_" + filterName + ".ppm");
hipFree(pixelsDevice);
hipFree(identityDevice);
hipFree(resultDevice);
delete [] pixels;
delete [] identity;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CUDAWithTiling(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA tiling filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernelI = kf->createKernel(kernelSize, "identity");
float* identity = kernelI->getFilter();
Kernel* kernelB = kf->createKernel(kernelSize, "gauss");
float* blur = kernelB->getFilter();
Kernel* kernelBB = kf->createKernel(kernelSize, "box");
float* boxBlur = kernelBB->getFilter();
Kernel* kernelE = kf->createKernel(kernelSize, "edges");
float* edge = kernelE->getFilter();
Kernel* kernelS = kf->createKernel(kernelSize, "sharpen");
float* sharpen = kernelS->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* blurDevice;
float* boxBlurDevice;
float* edgeDevice;
float* sharpenDevice;
float* resultDevice;
CUDA_CHECK_RETURN(hipMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(hipMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&blurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&boxBlurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&edgeDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&sharpenDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(hipMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(blurDevice, blur, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(boxBlurDevice, boxBlur, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(edgeDevice, edge, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(sharpenDevice, sharpen, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
// Scelta della dimensione di Grid e di ciascun blocco
dim3 blockDim(TILE_WIDTH, TILE_WIDTH);
dim3 gridDim(ceil(((float) widthResult) / TILE_WIDTH), ceil(((float) heightResult) / TILE_WIDTH));
// Invocazione del kernel per ogni filtro
hipLaunchKernelGGL(( tiling), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_tiling_identity.ppm");
hipLaunchKernelGGL(( tiling), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, blurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_tiling_blur.ppm");
hipLaunchKernelGGL(( tiling), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, boxBlurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_tiling_box_blur.ppm");
hipLaunchKernelGGL(( tiling), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, edgeDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_tiling_edge.ppm");
hipLaunchKernelGGL(( tiling), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, sharpenDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_sharpen.ppm");
hipFree(pixelsDevice);
hipFree(identityDevice);
hipFree(blurDevice);
hipFree(boxBlurDevice);
hipFree(edgeDevice);
hipFree(sharpenDevice);
hipFree(resultDevice);
delete [] pixels;
delete [] identity;
delete [] blur;
delete [] boxBlur;
delete [] edge;
delete [] sharpen;
delete [] result;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CUDANaive(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
float* identity = kernel->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* resultDevice;
CUDA_CHECK_RETURN(hipMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(hipMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(hipMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
dim3 blockDim(32, 32);
dim3 gridDim(ceil(((float) widthResult) / blockDim.x), ceil(((float) heightResult) / blockDim.y));
// Invocazione del kernel
hipLaunchKernelGGL(( naiveFiltering), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_naive_" + filterName + ".ppm");
hipFree(pixelsDevice);
hipFree(identityDevice);
hipFree(resultDevice);
delete [] pixels;
delete [] identity;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CUDANaive(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernelI = kf->createKernel(kernelSize, "identity");
float* identity = kernelI->getFilter();
Kernel* kernelB = kf->createKernel(kernelSize, "gauss");
float* blur = kernelB->getFilter();
Kernel* kernelBB = kf->createKernel(kernelSize, "box");
float* boxBlur = kernelBB->getFilter();
Kernel* kernelE = kf->createKernel(kernelSize, "edges");
float* edge = kernelE->getFilter();
Kernel* kernelS = kf->createKernel(kernelSize, "sharpen");
float* sharpen = kernelS->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* blurDevice;
float* boxBlurDevice;
float* edgeDevice;
float* sharpenDevice;
float* resultDevice;
CUDA_CHECK_RETURN(hipMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(hipMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&blurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&boxBlurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&edgeDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&sharpenDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(hipMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(hipMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(blurDevice, blur, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(boxBlurDevice, boxBlur, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(edgeDevice, edge, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(sharpenDevice, sharpen, kernelSize * kernelSize * sizeof(float), hipMemcpyHostToDevice));
// Scelta della dimensione di Grid e di ciascun blocco
dim3 blockDim(32, 32);
dim3 gridDim(ceil(((float) widthResult) / blockDim.x), ceil(((float) heightResult) / blockDim.y));
// Invocazione del kernel per ogni filtro
hipLaunchKernelGGL(( naiveFiltering), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_naive_identity.ppm");
hipLaunchKernelGGL(( naiveFiltering), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, blurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_blur.ppm");
hipLaunchKernelGGL(( naiveFiltering), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, boxBlurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_box_blur.ppm");
hipLaunchKernelGGL(( naiveFiltering), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, edgeDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_edge.ppm");
hipLaunchKernelGGL(( naiveFiltering), dim3(gridDim), dim3(blockDim), 0, 0, pixelsDevice, sharpenDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
hipMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_sharpen.ppm");
hipFree(pixelsDevice);
hipFree(identityDevice);
hipFree(blurDevice);
hipFree(boxBlurDevice);
hipFree(edgeDevice);
hipFree(sharpenDevice);
hipFree(resultDevice);
delete [] pixels;
delete [] identity;
delete [] blur;
delete [] boxBlur;
delete [] edge;
delete [] sharpen;
delete [] result;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CPPNaive(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "C++ sequential naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
std::stringstream path;
path << "../images/" << kernel->getType() << kernelSize << ".ppm";
std::string s = path.str();
(kernel->applyFiltering(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
delete kernel;
kernels.clear();
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
return duration;
}
double CPPNaive(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "C++ sequential naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
for (auto &kernel : kernels) {
std::stringstream path;
path << "../images/sequential_" << kernel->getType() << ".ppm";
std::string s = path.str();
(kernel->applyFiltering(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
}
for (auto &kernel : kernels) {
delete(kernel);
}
kernels.clear();
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
return duration;
}
double filteringOpenMP(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "OpenMP filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
auto start = omp_get_wtime();
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
std::stringstream path;
path << "../images/openMP_" << kernel->getType() << kernelSize << ".ppm";
std::string s = path.str();
(kernel->applyFilteringOpenMP(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
delete kernel;
kernels.clear();
double duration = (omp_get_wtime() - start);
return duration;
}
double filteringOpenMP(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "OpenMP filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
auto start = omp_get_wtime();
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
for (auto &kernel : kernels) {
std::stringstream path;
path << "../images/openMP_" << kernel->getType() << ".ppm";
std::string s = path.str();
(kernel->applyFilteringOpenMP(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
}
for (auto &kernel : kernels) {
delete(kernel);
}
kernels.clear();
double duration = (omp_get_wtime() - start);
return duration;
} | f2f9f505671f247e43c45cd1016f8617e4a26ba3.cu | #include "Image.h"
#include "Kernel.h"
#include "KernelFactory.h"
#include "CUDAError.h"
#include "filteringUtils.h"
#include "speedTests.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <sstream>
#include <cstdio>
#include <ctime>
#include <curand_mtgp32_kernel.h>
#include <omp.h>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define TILE_WIDTH 32
#define w (TILE_WIDTH + 3 - 1)
#define KERNEL_SIZE 3
double CUDAWithTiling(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA tiling filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
float* identity = kernel->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* resultDevice;
CUDA_CHECK_RETURN(cudaMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(cudaMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(cudaMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
dim3 blockDim(TILE_WIDTH, TILE_WIDTH);
dim3 gridDim(ceil(((float) widthResult) / TILE_WIDTH), ceil(((float) heightResult) / TILE_WIDTH));
// Invocazione del kernel
tiling<<<gridDim, blockDim>>>(pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_tiling_" + filterName + ".ppm");
cudaFree(pixelsDevice);
cudaFree(identityDevice);
cudaFree(resultDevice);
delete [] pixels;
delete [] identity;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CUDAWithTiling(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA tiling filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernelI = kf->createKernel(kernelSize, "identity");
float* identity = kernelI->getFilter();
Kernel* kernelB = kf->createKernel(kernelSize, "gauss");
float* blur = kernelB->getFilter();
Kernel* kernelBB = kf->createKernel(kernelSize, "box");
float* boxBlur = kernelBB->getFilter();
Kernel* kernelE = kf->createKernel(kernelSize, "edges");
float* edge = kernelE->getFilter();
Kernel* kernelS = kf->createKernel(kernelSize, "sharpen");
float* sharpen = kernelS->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* blurDevice;
float* boxBlurDevice;
float* edgeDevice;
float* sharpenDevice;
float* resultDevice;
CUDA_CHECK_RETURN(cudaMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(cudaMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&blurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&boxBlurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&edgeDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&sharpenDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(cudaMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(blurDevice, blur, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(boxBlurDevice, boxBlur, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(edgeDevice, edge, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(sharpenDevice, sharpen, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
// Scelta della dimensione di Grid e di ciascun blocco
dim3 blockDim(TILE_WIDTH, TILE_WIDTH);
dim3 gridDim(ceil(((float) widthResult) / TILE_WIDTH), ceil(((float) heightResult) / TILE_WIDTH));
// Invocazione del kernel per ogni filtro
tiling<<<gridDim, blockDim>>>(pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_tiling_identity.ppm");
tiling<<<gridDim, blockDim>>>(pixelsDevice, blurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_tiling_blur.ppm");
tiling<<<gridDim, blockDim>>>(pixelsDevice, boxBlurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_tiling_box_blur.ppm");
tiling<<<gridDim, blockDim>>>(pixelsDevice, edgeDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_tiling_edge.ppm");
tiling<<<gridDim, blockDim>>>(pixelsDevice, sharpenDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_sharpen.ppm");
cudaFree(pixelsDevice);
cudaFree(identityDevice);
cudaFree(blurDevice);
cudaFree(boxBlurDevice);
cudaFree(edgeDevice);
cudaFree(sharpenDevice);
cudaFree(resultDevice);
delete [] pixels;
delete [] identity;
delete [] blur;
delete [] boxBlur;
delete [] edge;
delete [] sharpen;
delete [] result;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CUDANaive(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
float* identity = kernel->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* resultDevice;
CUDA_CHECK_RETURN(cudaMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(cudaMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(cudaMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
dim3 blockDim(32, 32);
dim3 gridDim(ceil(((float) widthResult) / blockDim.x), ceil(((float) heightResult) / blockDim.y));
// Invocazione del kernel
naiveFiltering<<<gridDim, blockDim>>>(pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_naive_" + filterName + ".ppm");
cudaFree(pixelsDevice);
cudaFree(identityDevice);
cudaFree(resultDevice);
delete [] pixels;
delete [] identity;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CUDANaive(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "CUDA naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
float* pixels = img->getPixels();
int width = img->getWidth();
int height = img->getHeight();
int channels = img->getChannels();
auto* kf = new KernelFactory();
Kernel* kernelI = kf->createKernel(kernelSize, "identity");
float* identity = kernelI->getFilter();
Kernel* kernelB = kf->createKernel(kernelSize, "gauss");
float* blur = kernelB->getFilter();
Kernel* kernelBB = kf->createKernel(kernelSize, "box");
float* boxBlur = kernelBB->getFilter();
Kernel* kernelE = kf->createKernel(kernelSize, "edges");
float* edge = kernelE->getFilter();
Kernel* kernelS = kf->createKernel(kernelSize, "sharpen");
float* sharpen = kernelS->getFilter();
int widthResult = width - (kernelSize/2) * 2;
int heightResult = height - (kernelSize/2) * 2;
float* result = new float[widthResult * heightResult * channels];
// Allocazione memoria nel device
float* pixelsDevice;
float* identityDevice;
float* blurDevice;
float* boxBlurDevice;
float* edgeDevice;
float* sharpenDevice;
float* resultDevice;
CUDA_CHECK_RETURN(cudaMalloc((void **)&pixelsDevice, sizeof(float) * width * height * channels));
CUDA_CHECK_RETURN(cudaMalloc((void **)&identityDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&blurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&boxBlurDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&edgeDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&sharpenDevice, sizeof(float) * kernelSize * kernelSize));
CUDA_CHECK_RETURN(cudaMalloc((void **)&resultDevice, sizeof(float) * widthResult * heightResult * channels));
// Copia delle matrici nel device
CUDA_CHECK_RETURN(cudaMemcpy(pixelsDevice, pixels, width * height * channels * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(identityDevice, identity, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(blurDevice, blur, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(boxBlurDevice, boxBlur, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(edgeDevice, edge, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(sharpenDevice, sharpen, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice));
// Scelta della dimensione di Grid e di ciascun blocco
dim3 blockDim(32, 32);
dim3 gridDim(ceil(((float) widthResult) / blockDim.x), ceil(((float) heightResult) / blockDim.y));
// Invocazione del kernel per ogni filtro
naiveFiltering<<<gridDim, blockDim>>>(pixelsDevice, identityDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
Image* newImage = new Image(result, widthResult, heightResult, 255, channels, img->getMagic());
newImage->storeImage("../images/cuda_naive_identity.ppm");
naiveFiltering<<<gridDim, blockDim>>>(pixelsDevice, blurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_blur.ppm");
naiveFiltering<<<gridDim, blockDim>>>(pixelsDevice, boxBlurDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_box_blur.ppm");
naiveFiltering<<<gridDim, blockDim>>>(pixelsDevice, edgeDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_edge.ppm");
naiveFiltering<<<gridDim, blockDim>>>(pixelsDevice, sharpenDevice, resultDevice, width,
height, kernelSize, widthResult, heightResult, channels);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(result, resultDevice, sizeof(float) * widthResult * heightResult * channels,
cudaMemcpyDeviceToHost));
newImage->setPixels(result);
newImage->storeImage("../images/cuda_naive_sharpen.ppm");
cudaFree(pixelsDevice);
cudaFree(identityDevice);
cudaFree(blurDevice);
cudaFree(boxBlurDevice);
cudaFree(edgeDevice);
cudaFree(sharpenDevice);
cudaFree(resultDevice);
delete [] pixels;
delete [] identity;
delete [] blur;
delete [] boxBlur;
delete [] edge;
delete [] sharpen;
delete [] result;
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
printf("# pixels totali immagine nuova: %d\n", widthResult * heightResult);
printf("gridDim: %d, %d\n", gridDim.x, gridDim.y);
printf("blockDim: %d, %d\n", blockDim.x, blockDim.y);
printf("# blocchi: %d\n", gridDim.x * gridDim.y);
printf("Threads per blocco: %d\n", blockDim.x * blockDim.y);
printf("Threads totali: %d\n", blockDim.x * blockDim.y * gridDim.x * gridDim.y);
return duration;
}
double CPPNaive(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "C++ sequential naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
std::stringstream path;
path << "../images/" << kernel->getType() << kernelSize << ".ppm";
std::string s = path.str();
(kernel->applyFiltering(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
delete kernel;
kernels.clear();
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
return duration;
}
double CPPNaive(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "C++ sequential naive filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
std::clock_t start;
start = std::clock();
double duration;
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
for (auto &kernel : kernels) {
std::stringstream path;
path << "../images/sequential_" << kernel->getType() << ".ppm";
std::string s = path.str();
(kernel->applyFiltering(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
}
for (auto &kernel : kernels) {
delete(kernel);
}
kernels.clear();
duration = (std::clock() - start) / (double) CLOCKS_PER_SEC;
return duration;
}
double filteringOpenMP(int kernelSize, std::string imagePath, std::string filterName) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "OpenMP filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
auto start = omp_get_wtime();
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
Kernel* kernel = kf->createKernel(kernelSize, filterName);
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
std::stringstream path;
path << "../images/openMP_" << kernel->getType() << kernelSize << ".ppm";
std::string s = path.str();
(kernel->applyFilteringOpenMP(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
delete kernel;
kernels.clear();
double duration = (omp_get_wtime() - start);
return duration;
}
double filteringOpenMP(int kernelSize, std::string imagePath) {
std::cout << "" << std::endl;
std::cout << "" << std::endl;
std::cout << "OpenMP filtering" << std::endl;
std::cout << "Starting clock..." << std::endl;
auto start = omp_get_wtime();
Image* img = new Image(imagePath);
auto* kf = new KernelFactory();
std::vector<Kernel *> kernels = kf->createAllKernels(kernelSize);
for (auto &kernel : kernels) {
std::stringstream path;
path << "../images/openMP_" << kernel->getType() << ".ppm";
std::string s = path.str();
(kernel->applyFilteringOpenMP(img->getPixels(), img->getWidth(), img->getHeight(), img->getChannels(),
img->getMagic()))->storeImage(s);
}
for (auto &kernel : kernels) {
delete(kernel);
}
kernels.clear();
double duration = (omp_get_wtime() - start);
return duration;
} |
6220599c0803f73a30835b73a33faf0c0866c6ae.hip | // !!! This is a file automatically generated by hipify!!!
//---------------------------------------------------------------------------
#pragma hdrstop
#include "gridrec.h"
#include <sys/time.h>
//---------------------------------------------------------------------------
#pragma package(smart_init)
#ifdef USE_GRIDREC_GPU
texture<float2, 2, hipReadModeElementType> tex_cproj_res;
#endif
//---------------------------------------------------------------------------
GridRec::GridRec (void)
{
pswf_db[0].C = 4.0;
pswf_db[0].lmbda = 0.99588549;
pswf_db[0].nt = 16;
pswf_db[0].coefs[0] = 0.5239891E+01;
pswf_db[0].coefs[1] = -0.5308499E+01;
pswf_db[0].coefs[2] = 0.1184591E+01;
pswf_db[0].coefs[3] = -0.1230763E-00;
pswf_db[0].coefs[4] = 0.7371623E-02;
pswf_db[0].coefs[5] = -0.2864074E-03;
pswf_db[0].coefs[6] = 0.7789983E-05;
pswf_db[0].coefs[7] = -0.1564700E-06;
pswf_db[0].coefs[8] = 0.2414647E-08;
pswf_db[0].coefs[9] = 0.0;
pswf_db[0].coefs[10] = 0.0;
pswf_db[0].coefs[11] = 0.0;
pswf_db[0].coefs[12] = 0.0;
pswf_db[0].coefs[13] = 0.0;
pswf_db[0].coefs[14] = 0.0;
pswf_db[1].C = 4.2;
pswf_db[1].lmbda = 0.99657887;
pswf_db[1].nt = 16;
pswf_db[1].coefs[0] = 0.6062942E+01;
pswf_db[1].coefs[1] = -0.6450252E+01;
pswf_db[1].coefs[2] = 0.1551875E+01;
pswf_db[1].coefs[3] = -0.1755960E-01;
pswf_db[1].coefs[4] = 0.1150712E-01;
pswf_db[1].coefs[5] = -0.4903653E-03;
pswf_db[1].coefs[6] = 0.1464986E-04;
pswf_db[1].coefs[7] = -0.3235110E-06;
pswf_db[1].coefs[8] = 0.5492141E-08;
pswf_db[1].coefs[9] = 0.0;
pswf_db[1].coefs[10] = 0.0;
pswf_db[1].coefs[11] = 0.0;
pswf_db[1].coefs[12] = 0.0;
pswf_db[1].coefs[13] = 0.0;
pswf_db[1].coefs[14] = 0.0;
pswf_db[2].C = 5.0;
pswf_db[2].lmbda = 0.99935241;
pswf_db[2].nt = 18;
pswf_db[2].coefs[0] = 0.1115509E+02;
pswf_db[2].coefs[1] = -0.1384861E+02;
pswf_db[2].coefs[2] = 0.4289811E+01;
pswf_db[2].coefs[3] = -0.6514303E-00;
pswf_db[2].coefs[4] = 0.5844993E-01;
pswf_db[2].coefs[5] = -0.3447736E-02;
pswf_db[2].coefs[6] = 0.1435066E-03;
pswf_db[2].coefs[7] = -0.4433680E-05;
pswf_db[2].coefs[8] = 0.1056040E-06;
pswf_db[2].coefs[9] = -0.1997173E-08;
pswf_db[2].coefs[10] = 0.0;
pswf_db[2].coefs[11] = 0.0;
pswf_db[2].coefs[12] = 0.0;
pswf_db[2].coefs[13] = 0.0;
pswf_db[2].coefs[14] = 0.0;
pswf_db[3].C = 6.0;
pswf_db[3].lmbda = 0.9990188;
pswf_db[3].nt = 18;
pswf_db[3].coefs[0] = 0.2495593E+02;
pswf_db[3].coefs[1] = -0.3531124E+02;
pswf_db[3].coefs[2] = 0.1383722E+02;
pswf_db[3].coefs[3] = -0.2799028E+01;
pswf_db[3].coefs[4] = 0.3437217E-00;
pswf_db[3].coefs[5] = -0.2818024E-01;
pswf_db[3].coefs[6] = 0.1645842E-02;
pswf_db[3].coefs[7] = -0.7179160E-04;
pswf_db[3].coefs[8] = 0.2424510E-05;
pswf_db[3].coefs[9] = -0.6520875E-07;
pswf_db[3].coefs[10] = 0.0;
pswf_db[3].coefs[11] = 0.0;
pswf_db[3].coefs[12] = 0.0;
pswf_db[3].coefs[13] = 0.0;
pswf_db[3].coefs[14] = 0.0;
pswf_db[4].C = 7.0;
pswf_db[4].lmbda = 0.99998546;
pswf_db[4].nt = 20;
pswf_db[4].coefs[0] = 0.5767616E+02;
pswf_db[4].coefs[1] = -0.8931343E+02;
pswf_db[4].coefs[2] = 0.4167596E+02;
pswf_db[4].coefs[3] = -0.1053599E+02;
pswf_db[4].coefs[4] = 0.1662374E+01;
pswf_db[4].coefs[5] = -0.1780527E-00;
pswf_db[4].coefs[6] = 0.1372983E-01;
pswf_db[4].coefs[7] = -0.7963169E-03;
pswf_db[4].coefs[8] = 0.3593372E-04;
pswf_db[4].coefs[9] = -0.1295941E-05;
pswf_db[4].coefs[10] = 0.3817796E-07;
pswf_db[4].coefs[11] = 0.0;
pswf_db[4].coefs[12] = 0.0;
pswf_db[4].coefs[13] = 0.0;
pswf_db[4].coefs[14] = 0.0;
num_sinograms_needed = 2;
SINE = NULL;
COSE = NULL;
cproj = NULL;
filphase = NULL;
wtbl = NULL;
#ifdef INTERP
dwtbl = NULL;
#endif
winv = NULL;
work = NULL;
H = NULL;
G1 = NULL;
G2 = NULL;
S1 = NULL;
S2 = NULL;
#ifdef USE_GRIDREC_GPU
// GPU
data_H = NULL;
d_data_H = NULL;
deviceID = 0;
#endif // USE_GRIDREC_GPU
}
//---------------------------------------------------------------------------
// void GridRec::acknowledgements (LogFileClass *acknowledge_file)
// {
// acknowledge_file->Message ("__________________________________________________________________");
// acknowledge_file->Message ("GridRec class");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("Class for performing reconstructions based on the \"GridRec\" algorythm.");
// acknowledge_file->Message ("Origional source code developed in C by:");
// acknowledge_file->Message (" Still trying to find out who--there were no comments in the code ");
// acknowledge_file->Message ("Developed and Maintained by:");
// acknowledge_file->Message (" Brian Tieman & Francesco DeCarlo");
// acknowledge_file->Message (" Argonne National Laboratory");
// acknowledge_file->Message (" [email protected]");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("8/20/2003 V1.0 BT First version with acknowledgements");
// acknowledge_file->Message ("8/20/2003 V1.0 BT Ported C code to a CPP object structure");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("__________________________________________________________________");
// }
//---------------------------------------------------------------------------
void GridRec::setSinoAndReconBuffers (int number, float *sinogram_address, float *reconstruction_address)
{
int loop;
if (G1 == NULL)
G1 = (float **) malloc((size_t) (theta_list_size * sizeof(float *)));
if (G2 == NULL)
G2 = (float **) malloc((size_t) (theta_list_size * sizeof(float *)));
if (S1 == NULL)
S1 = (float **) malloc((size_t) (imgsiz * sizeof(float *)));
if (S2 == NULL)
S2 = (float **) malloc((size_t) (imgsiz * sizeof(float *)));
if (number == 1)
{
sinogram1 = sinogram_address;
reconstruction1 = reconstruction_address;
for (loop=0;loop<theta_list_size;loop++)
G1[loop] = &sinogram1[loop*sinogram_x_dim];
for (loop=0;loop<imgsiz;loop++)
S1[loop] = &reconstruction1[loop*sinogram_x_dim];
}
if (number == 2)
{
sinogram2 = sinogram_address;
reconstruction2 = reconstruction_address;
for (loop=0;loop<theta_list_size;loop++)
G2[loop] = &sinogram2[loop*sinogram_x_dim];
for (loop=0;loop<imgsiz;loop++)
S2[loop] = &reconstruction2[loop*sinogram_x_dim];
}
}
//---------------------------------------------------------------------------
void GridRec::init (void)
{
float center,
C,
MaxPixSiz,
R,
D0,
D1;
long itmp;
pswf_struct *pswf;
center = sinogram_x_dim / 2;
sampl = 1.0;
MaxPixSiz = 1.0;
R = 1.0;
X0 = 0.0;
Y0 = 0.0;
ltbl = 512;
get_pswf (6.0, &pswf);
C = pswf->C;
if (X0!=0.0||Y0!=0.0)
flag = 1;
else
flag = 0;
pdim = 1;
itmp = sinogram_x_dim-1;
while (itmp)
{
pdim<<=1;
itmp>>=1;
}
D0 = R*sinogram_x_dim;
D1 = sampl*D0;
M = 1;
itmp = (long int) (D1/MaxPixSiz-1);
while (itmp)
{
M<<=1;
itmp>>=1;
}
M02 = (long int) (floor(M/2/sampl-0.5));
M0 = 2*M02+1;
sampl = (float)M/M0;
D1 = sampl*D0;
L = 2*C*sampl/PI;
scale = D1/pdim;
cproj = (complex_struct *) malloc ((pdim+1) * sizeof(complex_struct));
filphase = (complex_struct *) malloc (((pdim/2)+1) * sizeof(complex_struct));
wtbl = (float *) malloc ((ltbl+1) * sizeof(float));
#ifdef INTERP
dwtbl = (float *) malloc ((ltbl+1) * sizeof(float));
#endif
winv = (float *) malloc (M0 * sizeof(float));
work = (float *) malloc (((int) L+1) * sizeof(float));
H = (complex_struct *) malloc ((M+1)*(M+1)*sizeof(complex_struct));
SINE = (float *) malloc (theta_list_size * sizeof (float));
COSE = (float *) malloc (theta_list_size * sizeof (float));
trig_su (0, theta_list_size);
filphase_su (pdim, center, filphase);
pswf_su (pswf, ltbl, M02, wtbl, dwtbl, winv);
imgsiz = M0;
#ifdef USE_GRIDREC_GPU
// GPU
hipSetDevice( deviceID );
// printf("Use CUDA device %d\n", deviceID);
//
data_H = new hipfftComplex[ M * M ];
hipMalloc( (void**)&d_data_H, sizeof( hipfftComplex ) * M * M );
hipfftResult res = hipfftPlan2d( &plan_ifft2, M, M, HIPFFT_C2C );
if( res != HIPFFT_SUCCESS )
printf("hipfftPlan2d failed\n ");
#endif // USE_GRIDREC_GPU
}
//---------------------------------------------------------------------------
void GridRec::reconstruct (void)
{
memset (H, 0, (M+1)*(M+1)*sizeof(complex_struct));
// #ifdef USE_GRIDREC_GPU
// float timePhase1;
// unsigned int timerPhase1 = 0; // test
// CUT_SAFE_CALL( cutCreateTimer( &timerPhase1 ) );
// CUT_SAFE_CALL( cutStartTimer( timerPhase1 ) );
// #endif // USE_GRIDREC_GPU
phase1 ();
// #ifdef USE_GRIDREC_GPU
// CUT_SAFE_CALL(cutStopTimer(timerPhase1)); // test
// timePhase1 = cutGetTimerValue(timerPhase1);
// CUT_SAFE_CALL(cutDeleteTimer(timerPhase1));
// printf("total time for Phase1 is %f ms \n", timePhase1);
//
// float timePhase2;
// unsigned int timerPhase2 = 0; // test
// CUT_SAFE_CALL( cutCreateTimer( &timerPhase2 ) );
// CUT_SAFE_CALL( cutStartTimer( timerPhase2 ) );
// #endif
phase2 ();
// #ifdef USE_GRIDREC_GPU
// CUT_SAFE_CALL(cutStopTimer(timerPhase2)); // test
// timePhase2 = cutGetTimerValue(timerPhase2);
// CUT_SAFE_CALL(cutDeleteTimer(timerPhase2));
// printf("total time for Phase2 is %f ms \n", timePhase2);
// //
// float timePhase3;
// unsigned int timerPhase3 = 0; // test
// CUT_SAFE_CALL( cutCreateTimer( &timerPhase3 ) );
// CUT_SAFE_CALL( cutStartTimer( timerPhase3 ) );
// #endif
phase3 ();
// #ifdef USE_GRIDREC_GPU
// CUT_SAFE_CALL(cutStopTimer(timerPhase3)); // test
// timePhase3 = cutGetTimerValue(timerPhase3);
// CUT_SAFE_CALL(cutDeleteTimer(timerPhase3));
// printf("total time for Phase3 is %f ms \n", timePhase3);
// #endif
return;
}
//---------------------------------------------------------------------------
void GridRec::destroy (void)
{
if (SINE != NULL)
free (SINE);
if (COSE != NULL)
free (COSE);
if (cproj != NULL)
free (cproj);
if (filphase != NULL)
free (filphase);
if (wtbl != NULL)
free (wtbl);
#ifdef INTERP
if (dwtbl != NULL)
free (dwtbl);
#endif
if (winv != NULL)
free (winv);
if (work != NULL)
free (work);
if (H != NULL)
free (H);
if (G1 != NULL)
free (G1);
if (G2 != NULL)
free (G2);
if (S1 != NULL)
free (S1);
if (S2 != NULL)
free (S2);
//
#ifdef USE_GRIDREC_GPU
// GPU
hipfftDestroy( plan_ifft2 );
hipFree( d_data_H );
delete [] data_H;
#endif // USE_GRIDREC_GPU
}
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
//Private Methods
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
void GridRec::phase1 (void)
{
/***Phase 1 ***************************************
Loop over the n_ang projection angles. For each angle, do
the following:
1. Copy the real projection data from the two slices into the
real and imaginary parts of the first n_det elements of the
complex array, cproj[]. Set the remaining pdim-n_det elements
to zero (zero-padding).
2. Carry out a (1D) Fourier transform on the complex data.
This results in transform data that is arranged in
"wrap-around" order, with non-negative spatial frequencies
occupying the first half, and negative frequencies the second
half, of the array, cproj[].
3. Multiply each element of the 1-D transform by a complex,
frequency dependent factor, filphase[]. These factors were
precomputed as part of recon_init() and combine the
tomographic filtering with a phase factor which shifts the
origin in configuration space to the projection of the
rotation axis as defined by the parameter, "center". If a
region of interest (ROI) centered on a different origin has
been specified [(X0,Y0)!=(0,0)], multiplication by an
additional phase factor, dependent on angle as well as
frequency, is required.
4. For each data element, find the Cartesian coordinates,
<U,V>, of the corresponding point in the 2D frequency plane,
in units of the spacing in the MxM rectangular grid placed
thereon; then calculate the upper and lower limits in each
coordinate direction of the integer coordinates for the
grid points contained in an LxL box centered on <U,V>.
Using a precomputed table of the (1-D) convolving function,
W, calculate the contribution of this data element to the
(2-D) convolvent (the 2_D convolvent is the product of
1_D convolvents in the X and Y directions) at each of these
grid points, and update the complex 2D array H accordingly.
At the end of Phase 1, the array H[][] contains data arranged in
"natural", rather than wrap-around order -- that is, the origin in
the spatial frequency plane is situated in the middle, rather than
at the beginning, of the array, H[][]. This simplifies the code
for carrying out the convolution (step 4 above), but necessitates
an additional correction -- See Phase 3 below.
**********************************************************************/
complex_struct Cdata1, Cdata2, Ctmp;
float U, V,
rtmp,
L2 = L/2.0,
convolv,
tblspcg = 2*ltbl/L;
long pdim2=pdim>>1,
M2=M>>1,
iul,
iuh,
iu,
ivl,
ivh,
iv,
n;
/* Following are to handle offset ROI case */
float offset=0.0; // !!!!!!!!!!!!!!!!! =0.0 l'ho aggiunto io. !!!!!!!!!!!!!!!!!!!!!!!!!!
complex_struct phfac;
for (n = 0; n < theta_list_size; n++) { /*** Start loop on angles */
int j, k;
if (flag)
offset = (X0 * COSE[n] + Y0*SINE[n]) * PI;
j = 1;
while (j < sinogram_x_dim + 1) {
cproj[j].r = G1[n][j-1];
cproj[j].i = G2[n][j-1];
j++;
}
while (j < pdim) { /** Zero fill the rest of array **/
cproj[j].r = cproj[j].i = 0.0;
j++;
}
four1 ((float *) cproj+1, pdim, 1);
for (j = 1; j < pdim2; j++) { /* Start loop on transform data */ // 550 ms
if (!flag) {
Ctmp.r = filphase[j].r;
Ctmp.i = filphase[j].i;
}
else {
phfac.r = cos(j*offset);
phfac.i = -sin(j*offset);
Cmult (Ctmp, filphase[j], phfac);
}
Cmult (Cdata1, Ctmp, cproj[j+1])
Ctmp.i = -Ctmp.i;
Cmult (Cdata2, Ctmp, cproj[(pdim-j)+1])
U = (rtmp=scale*j) * COSE[n] + M2; /* X direction */
V = rtmp * SINE[n] + M2; /* Y direction */
/* Note freq space origin is at (M2,M2), but we
offset the indices U, V, etc. to range from 0 to M-1 */
iul = (long int) (ceil(U-L2));
iuh = (long int) (floor(U+L2));
ivl = (long int) (ceil(V-L2));
ivh = (long int) (floor(V+L2));
if (iul<1)
iul=1;
if (iuh>=M)
iuh=M-1;
if (ivl<1)
ivl=1;
if (ivh>=M)
ivh=M-1;
/* Note aliasing value (at index=0) is forced to zero */
for (iv=ivl,k=0;iv<=ivh;iv++,k++)
work[k] = Cnvlvnt (abs (V-iv) * tblspcg);
for (iu=iul;iu<=iuh;iu++) {
rtmp=Cnvlvnt (abs(U-iu)*tblspcg);
for (iv=ivl,k=0;iv<=ivh;iv++,k++) {
convolv = rtmp*work[k];
H[iu*M+iv+1].r += convolv * Cdata1.r;
H[iu*M+iv+1].i += convolv * Cdata1.i;
H[(M-iu)*M+(M-iv)+1].r += convolv * Cdata2.r;
H[(M-iu)*M+(M-iv)+1].i += convolv * Cdata2.i;
}
}
} /*** End loop on transform data */
} /*** End loop on angles */
}
//---------------------------------------------------------------------------
void GridRec::phase2 (void)
{
/*** Phase 2 ********************************************
Carry out a 2D inverse FFT on the array H.
At the conclusion of this phase, the configuration
space data is arranged in wrap-around order with the origin
(center of reconstructed images) situated at the start of the
array. The first (resp. second) half of the array
contains the lower, Y<0 (resp, upper Y>0) part of the
image, and within each row of the array, the first
(resp. second) half contains data for the right [X>0]
(resp. left [X<0]) half of the image.
********************************************************************/
#ifdef USE_GRIDREC_GPU
float time_fft = 0;
unsigned int timerGridRec = 0;
CUT_SAFE_CALL( cutCreateTimer( &timerGridRec ) );
CUT_SAFE_CALL( cutStartTimer( timerGridRec ) );
for( int ny = 0; ny < M ; ny++ ){
for( int nx = 0; nx < M ; nx++ ){
data_H[ ny * M + nx ].x = H[ ny * M + nx + 1 ].r;
data_H[ ny * M + nx ].y = H[ ny * M + nx + 1 ].i;
}
}
hipMemcpy( d_data_H, data_H, sizeof( hipfftComplex ) * M * M, hipMemcpyHostToDevice );
hipfftResult res = hipfftExecC2C( plan_ifft2, d_data_H, d_data_H, HIPFFT_BACKWARD );
if( res != HIPFFT_SUCCESS )
printf("hipfftExecC2C failed\n ");
hipMemcpy( data_H, d_data_H, sizeof( hipfftComplex ) * M * M, hipMemcpyDeviceToHost );
// Note the coordinate transform here.
for( int ny = 0; ny < M ; ny++ ){
for( int nx = 0; nx < M ; nx++ ){
H[ ny * M + nx + 1 ].r = data_H[ (M - 1 - ny) * M + M - 1 - nx].x;
H[ ny * M + nx + 1 ].i = data_H[ (M - 1 - ny) * M + M - 1 - nx].y;
}
}
CUT_SAFE_CALL(cutStopTimer(timerGridRec));
time_fft += cutGetTimerValue(timerGridRec);
CUT_SAFE_CALL(cutDeleteTimer(timerGridRec));
// printf( "Time for fft in Phase 2 is %f (ms)\n ", time_fft );
#endif
#ifndef USE_GRIDREC_GPU
unsigned long H_size[3];
H_size[1] = H_size[2] = M;
fourn ((float*) H+1, H_size, 2, -1);
#endif
}
//---------------------------------------------------------------------------
void GridRec::phase3 (void)
{
/*** Phase 3 ******************************************************
Copy the real and imaginary parts of the complex data from H[][],
into the output buffers for the two reconstructed real images,
simultaneously carrying out a final multiplicative correction.
The correction factors are taken from the array, winv[], previously
computed in pswf_su(), and consist logically of three parts, namely:
1. A positive real factor, corresponding to the reciprocal
of the inverse Fourier transform, of the convolving
function, W, and
2. Multiplication by the cell size, (1/D1)^2, in 2D frequency
space. This correctly normalizes the 2D inverse FFT carried
out in Phase 2. (Note that all quantities are ewxpressed in
units in which the detector spacing is one.)
3. A sign change for the "odd-numbered" elements (in a
checkerboard pattern) of the array. This compensates
for the fact that the 2-D Fourier transform (Phase 2)
started with a frequency array in which the zero frequency
point appears in the middle of the array instead of at
its start.
Only the elements in the square M0xM0 subarray of H[][], centered
about the origin, are utilized. The other elements are not
part of the actual region being reconstructed and are
discarded. Because of the wrap-around ordering, the
subarray must actually be taken from the four corners" of the
2D array, H[][] -- See Phase 2 description, above.
The final data correponds physically to the linear X-ray absorption
coefficient expressed in units of the inverse detector spacing -- to
convert to inverse cm (say), one must divide the data by the detector
spacing in cm.
*********************************************************************/
long iu, iv, j, k, ustart, vstart, ufin, vfin;
float corrn_u, corrn;
j = 0;
ustart = (M-M02);
ufin = M;
while (j<M0) {
for (iu = ustart; iu < ufin; j++,iu++) {
corrn_u = winv[j];
k=0;
vstart = (M-M02);
vfin=M;
while (k<M0) {
for (iv=vstart;iv<vfin;k++,iv++) {
corrn = corrn_u * winv[k];
S1[j][k] = corrn * H[iu*M+iv+1].r;
S2[j][k] = corrn * H[iu*M+iv+1].i;
}
if (k<M0)
(vstart = 0, vfin = M02 + 1);
}
}
if (j<M0)
(ustart = 0, ufin = M02 + 1);
}
}
//---------------------------------------------------------------------------
void GridRec::trig_su (int geom, int n_ang)
{
/*********** Set up tables of sines and cosines. ***********/
int j;
switch (geom)
{
case 0 : {
float theta,
degtorad = PI/180,
*angle = theta_list;
for (j=0;j<n_ang;j++)
{
theta = degtorad*angle[j];
SINE[j] = sin(theta);
COSE[j] = cos(theta);
}
break;
}
case 1 :
case 2 : {
float dtheta = geom*PI/n_ang,
dcos,
dsin;
dcos = cos (dtheta);
dsin = sin (dtheta);
SINE[0] = 0.0;
COSE[0] = 1.0;
for(j=1;j<n_ang;j++)
{
SINE[j] = dcos*SINE[j-1]+dsin*COSE[j-1];
COSE[j] = dcos*COSE[j-1]-dsin*SINE[j-1];
}
break;
}
default : {
fprintf (stderr, "Illegal value for angle geometry indicator.\n");
exit(2);
}
}
}
//---------------------------------------------------------------------------
void GridRec::filphase_su (long pd, float center, complex_struct *A)
{
/******************************************************************/
/* Set up the complex array, filphase[], each element of which */
/* consists of a real filter factor [obtained from the function, */
/* (*pf)()], multiplying a complex phase factor (derived from the */
/* parameter, center}. See Phase 1 comments in do_recon(), above.*/
/******************************************************************/
long j,
pd2=pd>>1;
float x,
rtmp1=2*PI*center/pd,
rtmp2;
float norm=PI/pd/theta_list_size; /* Normalization factor for back transform 7/7/98 */
for (j=0;j<pd2;j++)
{
x = j*rtmp1;
rtmp2 = filter.filterData ((float)j/pd) * norm;
A[j].r = rtmp2*cos(x);
A[j].i = -rtmp2*sin(x);
}
// Note: filphase[] (A[]) is of size pdim2 (pd2) + 1. But only the first pdim2 elements
// are set. The last one filphase[pdim2] is not assigned any value.
// 8/24/2011 Yongsheng Pan
}
//---------------------------------------------------------------------------
void GridRec::pswf_su (pswf_struct *pswf, long ltbl, long linv, float* wtbl, float* dwtbl, float* winv)
{
/*************************************************************/
/* Set up lookup tables for convolvent (used in Phase 1 of */
/* do_recon()), and for the final correction factor (used in */
/* Phase 3). */
/*************************************************************/
float C,
*coefs,
lmbda,
polyz,
norm,fac;
long i;
int nt;
C=pswf->C;
nt=pswf->nt;
coefs=pswf->coefs;
lmbda=pswf->lmbda;
polyz=legendre(nt,coefs,0.);
wtbl[0]=1.0;
for (i=1;i<=ltbl;i++)
{
wtbl[i]=legendre(nt,coefs,(float)i/ltbl)/polyz;
#ifdef INTERP
dwtbl[i]=wtbl[i]-wtbl[i-1];
#endif
}
fac=(float)ltbl/(linv+0.5);
norm=sqrt (PI/2/C/lmbda)/sampl; /* 7/7/98 */
/* Note the final result at end of Phase 3 contains the factor,
norm^2. This incorporates the normalization of the 2D
inverse FFT in Phase 2 as well as scale factors involved
in the inverse Fourier transform of the convolvent.
7/7/98 */
winv[linv]=norm/Cnvlvnt(0.);
for (i=1;i<=linv;i++)
{
norm=-norm;
/* Minus sign for alternate entries
corrects for "natural" data layout
in array H at end of Phase 1. */
winv[linv+i] = winv[linv-i] = norm/Cnvlvnt(i*fac);
}
}
//---------------------------------------------------------------------------
float GridRec::legendre (int n, float *coefs, float x)
{
/***************************************************
* *
* Compute SUM(coefs(k)*P(2*k,x), for k=0,n/2) *
* *
* where P(j,x) is the jth Legendre polynomial *
* *
***************************************************/
float penult, last, newer, y;
int j, k, even;
if (x>1||x<-1){
fprintf(stderr, "\nInvalid argument to legendre()");
exit(2);
}
y=coefs[0];
penult=1.;
last=x;
even=1;
k=1;
for (j=2;j<=n;j++) {
newer=(x*(2*j-1)*last-(j-1)*penult)/j;
if (even) {
y+=newer*coefs[k];
even=0;
k++;
}
else
even=1;
penult=last;
last=newer;
}
return y;
}
//---------------------------------------------------------------------------
void GridRec::get_pswf (float C, pswf_struct **P)
{
int i=0;
while (i<NO_PSWFS && abs(C-pswf_db[i].C)>0.01)
i++;
if (i>=NO_PSWFS)
{
fprintf(stderr, "Prolate parameter, C = %f not in data base\n",C);
exit(2);
}
*P = &pswf_db[i];
return;
}
void GridRec::setGPUDeviceID(int id ){
deviceID = id;
}
int GridRec::getGPUDeviceID( ){
return deviceID;
}
| 6220599c0803f73a30835b73a33faf0c0866c6ae.cu | //---------------------------------------------------------------------------
#pragma hdrstop
#include "gridrec.h"
#include <sys/time.h>
//---------------------------------------------------------------------------
#pragma package(smart_init)
#ifdef USE_GRIDREC_GPU
texture<float2, 2, cudaReadModeElementType> tex_cproj_res;
#endif
//---------------------------------------------------------------------------
GridRec::GridRec (void)
{
pswf_db[0].C = 4.0;
pswf_db[0].lmbda = 0.99588549;
pswf_db[0].nt = 16;
pswf_db[0].coefs[0] = 0.5239891E+01;
pswf_db[0].coefs[1] = -0.5308499E+01;
pswf_db[0].coefs[2] = 0.1184591E+01;
pswf_db[0].coefs[3] = -0.1230763E-00;
pswf_db[0].coefs[4] = 0.7371623E-02;
pswf_db[0].coefs[5] = -0.2864074E-03;
pswf_db[0].coefs[6] = 0.7789983E-05;
pswf_db[0].coefs[7] = -0.1564700E-06;
pswf_db[0].coefs[8] = 0.2414647E-08;
pswf_db[0].coefs[9] = 0.0;
pswf_db[0].coefs[10] = 0.0;
pswf_db[0].coefs[11] = 0.0;
pswf_db[0].coefs[12] = 0.0;
pswf_db[0].coefs[13] = 0.0;
pswf_db[0].coefs[14] = 0.0;
pswf_db[1].C = 4.2;
pswf_db[1].lmbda = 0.99657887;
pswf_db[1].nt = 16;
pswf_db[1].coefs[0] = 0.6062942E+01;
pswf_db[1].coefs[1] = -0.6450252E+01;
pswf_db[1].coefs[2] = 0.1551875E+01;
pswf_db[1].coefs[3] = -0.1755960E-01;
pswf_db[1].coefs[4] = 0.1150712E-01;
pswf_db[1].coefs[5] = -0.4903653E-03;
pswf_db[1].coefs[6] = 0.1464986E-04;
pswf_db[1].coefs[7] = -0.3235110E-06;
pswf_db[1].coefs[8] = 0.5492141E-08;
pswf_db[1].coefs[9] = 0.0;
pswf_db[1].coefs[10] = 0.0;
pswf_db[1].coefs[11] = 0.0;
pswf_db[1].coefs[12] = 0.0;
pswf_db[1].coefs[13] = 0.0;
pswf_db[1].coefs[14] = 0.0;
pswf_db[2].C = 5.0;
pswf_db[2].lmbda = 0.99935241;
pswf_db[2].nt = 18;
pswf_db[2].coefs[0] = 0.1115509E+02;
pswf_db[2].coefs[1] = -0.1384861E+02;
pswf_db[2].coefs[2] = 0.4289811E+01;
pswf_db[2].coefs[3] = -0.6514303E-00;
pswf_db[2].coefs[4] = 0.5844993E-01;
pswf_db[2].coefs[5] = -0.3447736E-02;
pswf_db[2].coefs[6] = 0.1435066E-03;
pswf_db[2].coefs[7] = -0.4433680E-05;
pswf_db[2].coefs[8] = 0.1056040E-06;
pswf_db[2].coefs[9] = -0.1997173E-08;
pswf_db[2].coefs[10] = 0.0;
pswf_db[2].coefs[11] = 0.0;
pswf_db[2].coefs[12] = 0.0;
pswf_db[2].coefs[13] = 0.0;
pswf_db[2].coefs[14] = 0.0;
pswf_db[3].C = 6.0;
pswf_db[3].lmbda = 0.9990188;
pswf_db[3].nt = 18;
pswf_db[3].coefs[0] = 0.2495593E+02;
pswf_db[3].coefs[1] = -0.3531124E+02;
pswf_db[3].coefs[2] = 0.1383722E+02;
pswf_db[3].coefs[3] = -0.2799028E+01;
pswf_db[3].coefs[4] = 0.3437217E-00;
pswf_db[3].coefs[5] = -0.2818024E-01;
pswf_db[3].coefs[6] = 0.1645842E-02;
pswf_db[3].coefs[7] = -0.7179160E-04;
pswf_db[3].coefs[8] = 0.2424510E-05;
pswf_db[3].coefs[9] = -0.6520875E-07;
pswf_db[3].coefs[10] = 0.0;
pswf_db[3].coefs[11] = 0.0;
pswf_db[3].coefs[12] = 0.0;
pswf_db[3].coefs[13] = 0.0;
pswf_db[3].coefs[14] = 0.0;
pswf_db[4].C = 7.0;
pswf_db[4].lmbda = 0.99998546;
pswf_db[4].nt = 20;
pswf_db[4].coefs[0] = 0.5767616E+02;
pswf_db[4].coefs[1] = -0.8931343E+02;
pswf_db[4].coefs[2] = 0.4167596E+02;
pswf_db[4].coefs[3] = -0.1053599E+02;
pswf_db[4].coefs[4] = 0.1662374E+01;
pswf_db[4].coefs[5] = -0.1780527E-00;
pswf_db[4].coefs[6] = 0.1372983E-01;
pswf_db[4].coefs[7] = -0.7963169E-03;
pswf_db[4].coefs[8] = 0.3593372E-04;
pswf_db[4].coefs[9] = -0.1295941E-05;
pswf_db[4].coefs[10] = 0.3817796E-07;
pswf_db[4].coefs[11] = 0.0;
pswf_db[4].coefs[12] = 0.0;
pswf_db[4].coefs[13] = 0.0;
pswf_db[4].coefs[14] = 0.0;
num_sinograms_needed = 2;
SINE = NULL;
COSE = NULL;
cproj = NULL;
filphase = NULL;
wtbl = NULL;
#ifdef INTERP
dwtbl = NULL;
#endif
winv = NULL;
work = NULL;
H = NULL;
G1 = NULL;
G2 = NULL;
S1 = NULL;
S2 = NULL;
#ifdef USE_GRIDREC_GPU
// GPU
data_H = NULL;
d_data_H = NULL;
deviceID = 0;
#endif // USE_GRIDREC_GPU
}
//---------------------------------------------------------------------------
// void GridRec::acknowledgements (LogFileClass *acknowledge_file)
// {
// acknowledge_file->Message ("__________________________________________________________________");
// acknowledge_file->Message ("GridRec class");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("Class for performing reconstructions based on the \"GridRec\" algorythm.");
// acknowledge_file->Message ("Origional source code developed in C by:");
// acknowledge_file->Message (" Still trying to find out who--there were no comments in the code ");
// acknowledge_file->Message ("Developed and Maintained by:");
// acknowledge_file->Message (" Brian Tieman & Francesco DeCarlo");
// acknowledge_file->Message (" Argonne National Laboratory");
// acknowledge_file->Message (" [email protected]");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("8/20/2003 V1.0 BT First version with acknowledgements");
// acknowledge_file->Message ("8/20/2003 V1.0 BT Ported C code to a CPP object structure");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("");
// acknowledge_file->Message ("__________________________________________________________________");
// }
//---------------------------------------------------------------------------
void GridRec::setSinoAndReconBuffers (int number, float *sinogram_address, float *reconstruction_address)
{
int loop;
if (G1 == NULL)
G1 = (float **) malloc((size_t) (theta_list_size * sizeof(float *)));
if (G2 == NULL)
G2 = (float **) malloc((size_t) (theta_list_size * sizeof(float *)));
if (S1 == NULL)
S1 = (float **) malloc((size_t) (imgsiz * sizeof(float *)));
if (S2 == NULL)
S2 = (float **) malloc((size_t) (imgsiz * sizeof(float *)));
if (number == 1)
{
sinogram1 = sinogram_address;
reconstruction1 = reconstruction_address;
for (loop=0;loop<theta_list_size;loop++)
G1[loop] = &sinogram1[loop*sinogram_x_dim];
for (loop=0;loop<imgsiz;loop++)
S1[loop] = &reconstruction1[loop*sinogram_x_dim];
}
if (number == 2)
{
sinogram2 = sinogram_address;
reconstruction2 = reconstruction_address;
for (loop=0;loop<theta_list_size;loop++)
G2[loop] = &sinogram2[loop*sinogram_x_dim];
for (loop=0;loop<imgsiz;loop++)
S2[loop] = &reconstruction2[loop*sinogram_x_dim];
}
}
//---------------------------------------------------------------------------
void GridRec::init (void)
{
float center,
C,
MaxPixSiz,
R,
D0,
D1;
long itmp;
pswf_struct *pswf;
center = sinogram_x_dim / 2;
sampl = 1.0;
MaxPixSiz = 1.0;
R = 1.0;
X0 = 0.0;
Y0 = 0.0;
ltbl = 512;
get_pswf (6.0, &pswf);
C = pswf->C;
if (X0!=0.0||Y0!=0.0)
flag = 1;
else
flag = 0;
pdim = 1;
itmp = sinogram_x_dim-1;
while (itmp)
{
pdim<<=1;
itmp>>=1;
}
D0 = R*sinogram_x_dim;
D1 = sampl*D0;
M = 1;
itmp = (long int) (D1/MaxPixSiz-1);
while (itmp)
{
M<<=1;
itmp>>=1;
}
M02 = (long int) (floor(M/2/sampl-0.5));
M0 = 2*M02+1;
sampl = (float)M/M0;
D1 = sampl*D0;
L = 2*C*sampl/PI;
scale = D1/pdim;
cproj = (complex_struct *) malloc ((pdim+1) * sizeof(complex_struct));
filphase = (complex_struct *) malloc (((pdim/2)+1) * sizeof(complex_struct));
wtbl = (float *) malloc ((ltbl+1) * sizeof(float));
#ifdef INTERP
dwtbl = (float *) malloc ((ltbl+1) * sizeof(float));
#endif
winv = (float *) malloc (M0 * sizeof(float));
work = (float *) malloc (((int) L+1) * sizeof(float));
H = (complex_struct *) malloc ((M+1)*(M+1)*sizeof(complex_struct));
SINE = (float *) malloc (theta_list_size * sizeof (float));
COSE = (float *) malloc (theta_list_size * sizeof (float));
trig_su (0, theta_list_size);
filphase_su (pdim, center, filphase);
pswf_su (pswf, ltbl, M02, wtbl, dwtbl, winv);
imgsiz = M0;
#ifdef USE_GRIDREC_GPU
// GPU
cudaSetDevice( deviceID );
// printf("Use CUDA device %d\n", deviceID);
//
data_H = new cufftComplex[ M * M ];
cudaMalloc( (void**)&d_data_H, sizeof( cufftComplex ) * M * M );
cufftResult res = cufftPlan2d( &plan_ifft2, M, M, CUFFT_C2C );
if( res != CUFFT_SUCCESS )
printf("cufftPlan2d failed\n ");
#endif // USE_GRIDREC_GPU
}
//---------------------------------------------------------------------------
void GridRec::reconstruct (void)
{
memset (H, 0, (M+1)*(M+1)*sizeof(complex_struct));
// #ifdef USE_GRIDREC_GPU
// float timePhase1;
// unsigned int timerPhase1 = 0; // test
// CUT_SAFE_CALL( cutCreateTimer( &timerPhase1 ) );
// CUT_SAFE_CALL( cutStartTimer( timerPhase1 ) );
// #endif // USE_GRIDREC_GPU
phase1 ();
// #ifdef USE_GRIDREC_GPU
// CUT_SAFE_CALL(cutStopTimer(timerPhase1)); // test
// timePhase1 = cutGetTimerValue(timerPhase1);
// CUT_SAFE_CALL(cutDeleteTimer(timerPhase1));
// printf("total time for Phase1 is %f ms \n", timePhase1);
//
// float timePhase2;
// unsigned int timerPhase2 = 0; // test
// CUT_SAFE_CALL( cutCreateTimer( &timerPhase2 ) );
// CUT_SAFE_CALL( cutStartTimer( timerPhase2 ) );
// #endif
phase2 ();
// #ifdef USE_GRIDREC_GPU
// CUT_SAFE_CALL(cutStopTimer(timerPhase2)); // test
// timePhase2 = cutGetTimerValue(timerPhase2);
// CUT_SAFE_CALL(cutDeleteTimer(timerPhase2));
// printf("total time for Phase2 is %f ms \n", timePhase2);
// //
// float timePhase3;
// unsigned int timerPhase3 = 0; // test
// CUT_SAFE_CALL( cutCreateTimer( &timerPhase3 ) );
// CUT_SAFE_CALL( cutStartTimer( timerPhase3 ) );
// #endif
phase3 ();
// #ifdef USE_GRIDREC_GPU
// CUT_SAFE_CALL(cutStopTimer(timerPhase3)); // test
// timePhase3 = cutGetTimerValue(timerPhase3);
// CUT_SAFE_CALL(cutDeleteTimer(timerPhase3));
// printf("total time for Phase3 is %f ms \n", timePhase3);
// #endif
return;
}
//---------------------------------------------------------------------------
void GridRec::destroy (void)
{
if (SINE != NULL)
free (SINE);
if (COSE != NULL)
free (COSE);
if (cproj != NULL)
free (cproj);
if (filphase != NULL)
free (filphase);
if (wtbl != NULL)
free (wtbl);
#ifdef INTERP
if (dwtbl != NULL)
free (dwtbl);
#endif
if (winv != NULL)
free (winv);
if (work != NULL)
free (work);
if (H != NULL)
free (H);
if (G1 != NULL)
free (G1);
if (G2 != NULL)
free (G2);
if (S1 != NULL)
free (S1);
if (S2 != NULL)
free (S2);
//
#ifdef USE_GRIDREC_GPU
// GPU
cufftDestroy( plan_ifft2 );
cudaFree( d_data_H );
delete [] data_H;
#endif // USE_GRIDREC_GPU
}
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
//Private Methods
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
void GridRec::phase1 (void)
{
/***Phase 1 ***************************************
Loop over the n_ang projection angles. For each angle, do
the following:
1. Copy the real projection data from the two slices into the
real and imaginary parts of the first n_det elements of the
complex array, cproj[]. Set the remaining pdim-n_det elements
to zero (zero-padding).
2. Carry out a (1D) Fourier transform on the complex data.
This results in transform data that is arranged in
"wrap-around" order, with non-negative spatial frequencies
occupying the first half, and negative frequencies the second
half, of the array, cproj[].
3. Multiply each element of the 1-D transform by a complex,
frequency dependent factor, filphase[]. These factors were
precomputed as part of recon_init() and combine the
tomographic filtering with a phase factor which shifts the
origin in configuration space to the projection of the
rotation axis as defined by the parameter, "center". If a
region of interest (ROI) centered on a different origin has
been specified [(X0,Y0)!=(0,0)], multiplication by an
additional phase factor, dependent on angle as well as
frequency, is required.
4. For each data element, find the Cartesian coordinates,
<U,V>, of the corresponding point in the 2D frequency plane,
in units of the spacing in the MxM rectangular grid placed
thereon; then calculate the upper and lower limits in each
coordinate direction of the integer coordinates for the
grid points contained in an LxL box centered on <U,V>.
Using a precomputed table of the (1-D) convolving function,
W, calculate the contribution of this data element to the
(2-D) convolvent (the 2_D convolvent is the product of
1_D convolvents in the X and Y directions) at each of these
grid points, and update the complex 2D array H accordingly.
At the end of Phase 1, the array H[][] contains data arranged in
"natural", rather than wrap-around order -- that is, the origin in
the spatial frequency plane is situated in the middle, rather than
at the beginning, of the array, H[][]. This simplifies the code
for carrying out the convolution (step 4 above), but necessitates
an additional correction -- See Phase 3 below.
**********************************************************************/
complex_struct Cdata1, Cdata2, Ctmp;
float U, V,
rtmp,
L2 = L/2.0,
convolv,
tblspcg = 2*ltbl/L;
long pdim2=pdim>>1,
M2=M>>1,
iul,
iuh,
iu,
ivl,
ivh,
iv,
n;
/* Following are to handle offset ROI case */
float offset=0.0; // !!!!!!!!!!!!!!!!! =0.0 l'ho aggiunto io. !!!!!!!!!!!!!!!!!!!!!!!!!!
complex_struct phfac;
for (n = 0; n < theta_list_size; n++) { /*** Start loop on angles */
int j, k;
if (flag)
offset = (X0 * COSE[n] + Y0*SINE[n]) * PI;
j = 1;
while (j < sinogram_x_dim + 1) {
cproj[j].r = G1[n][j-1];
cproj[j].i = G2[n][j-1];
j++;
}
while (j < pdim) { /** Zero fill the rest of array **/
cproj[j].r = cproj[j].i = 0.0;
j++;
}
four1 ((float *) cproj+1, pdim, 1);
for (j = 1; j < pdim2; j++) { /* Start loop on transform data */ // 550 ms
if (!flag) {
Ctmp.r = filphase[j].r;
Ctmp.i = filphase[j].i;
}
else {
phfac.r = cos(j*offset);
phfac.i = -sin(j*offset);
Cmult (Ctmp, filphase[j], phfac);
}
Cmult (Cdata1, Ctmp, cproj[j+1])
Ctmp.i = -Ctmp.i;
Cmult (Cdata2, Ctmp, cproj[(pdim-j)+1])
U = (rtmp=scale*j) * COSE[n] + M2; /* X direction */
V = rtmp * SINE[n] + M2; /* Y direction */
/* Note freq space origin is at (M2,M2), but we
offset the indices U, V, etc. to range from 0 to M-1 */
iul = (long int) (ceil(U-L2));
iuh = (long int) (floor(U+L2));
ivl = (long int) (ceil(V-L2));
ivh = (long int) (floor(V+L2));
if (iul<1)
iul=1;
if (iuh>=M)
iuh=M-1;
if (ivl<1)
ivl=1;
if (ivh>=M)
ivh=M-1;
/* Note aliasing value (at index=0) is forced to zero */
for (iv=ivl,k=0;iv<=ivh;iv++,k++)
work[k] = Cnvlvnt (abs (V-iv) * tblspcg);
for (iu=iul;iu<=iuh;iu++) {
rtmp=Cnvlvnt (abs(U-iu)*tblspcg);
for (iv=ivl,k=0;iv<=ivh;iv++,k++) {
convolv = rtmp*work[k];
H[iu*M+iv+1].r += convolv * Cdata1.r;
H[iu*M+iv+1].i += convolv * Cdata1.i;
H[(M-iu)*M+(M-iv)+1].r += convolv * Cdata2.r;
H[(M-iu)*M+(M-iv)+1].i += convolv * Cdata2.i;
}
}
} /*** End loop on transform data */
} /*** End loop on angles */
}
//---------------------------------------------------------------------------
void GridRec::phase2 (void)
{
/*** Phase 2 ********************************************
Carry out a 2D inverse FFT on the array H.
At the conclusion of this phase, the configuration
space data is arranged in wrap-around order with the origin
(center of reconstructed images) situated at the start of the
array. The first (resp. second) half of the array
contains the lower, Y<0 (resp, upper Y>0) part of the
image, and within each row of the array, the first
(resp. second) half contains data for the right [X>0]
(resp. left [X<0]) half of the image.
********************************************************************/
#ifdef USE_GRIDREC_GPU
float time_fft = 0;
unsigned int timerGridRec = 0;
CUT_SAFE_CALL( cutCreateTimer( &timerGridRec ) );
CUT_SAFE_CALL( cutStartTimer( timerGridRec ) );
for( int ny = 0; ny < M ; ny++ ){
for( int nx = 0; nx < M ; nx++ ){
data_H[ ny * M + nx ].x = H[ ny * M + nx + 1 ].r;
data_H[ ny * M + nx ].y = H[ ny * M + nx + 1 ].i;
}
}
cudaMemcpy( d_data_H, data_H, sizeof( cufftComplex ) * M * M, cudaMemcpyHostToDevice );
cufftResult res = cufftExecC2C( plan_ifft2, d_data_H, d_data_H, CUFFT_INVERSE );
if( res != CUFFT_SUCCESS )
printf("cufftExecC2C failed\n ");
cudaMemcpy( data_H, d_data_H, sizeof( cufftComplex ) * M * M, cudaMemcpyDeviceToHost );
// Note the coordinate transform here.
for( int ny = 0; ny < M ; ny++ ){
for( int nx = 0; nx < M ; nx++ ){
H[ ny * M + nx + 1 ].r = data_H[ (M - 1 - ny) * M + M - 1 - nx].x;
H[ ny * M + nx + 1 ].i = data_H[ (M - 1 - ny) * M + M - 1 - nx].y;
}
}
CUT_SAFE_CALL(cutStopTimer(timerGridRec));
time_fft += cutGetTimerValue(timerGridRec);
CUT_SAFE_CALL(cutDeleteTimer(timerGridRec));
// printf( "Time for fft in Phase 2 is %f (ms)\n ", time_fft );
#endif
#ifndef USE_GRIDREC_GPU
unsigned long H_size[3];
H_size[1] = H_size[2] = M;
fourn ((float*) H+1, H_size, 2, -1);
#endif
}
//---------------------------------------------------------------------------
void GridRec::phase3 (void)
{
/*** Phase 3 ******************************************************
Copy the real and imaginary parts of the complex data from H[][],
into the output buffers for the two reconstructed real images,
simultaneously carrying out a final multiplicative correction.
The correction factors are taken from the array, winv[], previously
computed in pswf_su(), and consist logically of three parts, namely:
1. A positive real factor, corresponding to the reciprocal
of the inverse Fourier transform, of the convolving
function, W, and
2. Multiplication by the cell size, (1/D1)^2, in 2D frequency
space. This correctly normalizes the 2D inverse FFT carried
out in Phase 2. (Note that all quantities are ewxpressed in
units in which the detector spacing is one.)
3. A sign change for the "odd-numbered" elements (in a
checkerboard pattern) of the array. This compensates
for the fact that the 2-D Fourier transform (Phase 2)
started with a frequency array in which the zero frequency
point appears in the middle of the array instead of at
its start.
Only the elements in the square M0xM0 subarray of H[][], centered
about the origin, are utilized. The other elements are not
part of the actual region being reconstructed and are
discarded. Because of the wrap-around ordering, the
subarray must actually be taken from the four corners" of the
2D array, H[][] -- See Phase 2 description, above.
The final data correponds physically to the linear X-ray absorption
coefficient expressed in units of the inverse detector spacing -- to
convert to inverse cm (say), one must divide the data by the detector
spacing in cm.
*********************************************************************/
long iu, iv, j, k, ustart, vstart, ufin, vfin;
float corrn_u, corrn;
j = 0;
ustart = (M-M02);
ufin = M;
while (j<M0) {
for (iu = ustart; iu < ufin; j++,iu++) {
corrn_u = winv[j];
k=0;
vstart = (M-M02);
vfin=M;
while (k<M0) {
for (iv=vstart;iv<vfin;k++,iv++) {
corrn = corrn_u * winv[k];
S1[j][k] = corrn * H[iu*M+iv+1].r;
S2[j][k] = corrn * H[iu*M+iv+1].i;
}
if (k<M0)
(vstart = 0, vfin = M02 + 1);
}
}
if (j<M0)
(ustart = 0, ufin = M02 + 1);
}
}
//---------------------------------------------------------------------------
void GridRec::trig_su (int geom, int n_ang)
{
/*********** Set up tables of sines and cosines. ***********/
int j;
switch (geom)
{
case 0 : {
float theta,
degtorad = PI/180,
*angle = theta_list;
for (j=0;j<n_ang;j++)
{
theta = degtorad*angle[j];
SINE[j] = sin(theta);
COSE[j] = cos(theta);
}
break;
}
case 1 :
case 2 : {
float dtheta = geom*PI/n_ang,
dcos,
dsin;
dcos = cos (dtheta);
dsin = sin (dtheta);
SINE[0] = 0.0;
COSE[0] = 1.0;
for(j=1;j<n_ang;j++)
{
SINE[j] = dcos*SINE[j-1]+dsin*COSE[j-1];
COSE[j] = dcos*COSE[j-1]-dsin*SINE[j-1];
}
break;
}
default : {
fprintf (stderr, "Illegal value for angle geometry indicator.\n");
exit(2);
}
}
}
//---------------------------------------------------------------------------
void GridRec::filphase_su (long pd, float center, complex_struct *A)
{
/******************************************************************/
/* Set up the complex array, filphase[], each element of which */
/* consists of a real filter factor [obtained from the function, */
/* (*pf)()], multiplying a complex phase factor (derived from the */
/* parameter, center}. See Phase 1 comments in do_recon(), above.*/
/******************************************************************/
long j,
pd2=pd>>1;
float x,
rtmp1=2*PI*center/pd,
rtmp2;
float norm=PI/pd/theta_list_size; /* Normalization factor for back transform 7/7/98 */
for (j=0;j<pd2;j++)
{
x = j*rtmp1;
rtmp2 = filter.filterData ((float)j/pd) * norm;
A[j].r = rtmp2*cos(x);
A[j].i = -rtmp2*sin(x);
}
// Note: filphase[] (A[]) is of size pdim2 (pd2) + 1. But only the first pdim2 elements
// are set. The last one filphase[pdim2] is not assigned any value.
// 8/24/2011 Yongsheng Pan
}
//---------------------------------------------------------------------------
void GridRec::pswf_su (pswf_struct *pswf, long ltbl, long linv, float* wtbl, float* dwtbl, float* winv)
{
/*************************************************************/
/* Set up lookup tables for convolvent (used in Phase 1 of */
/* do_recon()), and for the final correction factor (used in */
/* Phase 3). */
/*************************************************************/
float C,
*coefs,
lmbda,
polyz,
norm,fac;
long i;
int nt;
C=pswf->C;
nt=pswf->nt;
coefs=pswf->coefs;
lmbda=pswf->lmbda;
polyz=legendre(nt,coefs,0.);
wtbl[0]=1.0;
for (i=1;i<=ltbl;i++)
{
wtbl[i]=legendre(nt,coefs,(float)i/ltbl)/polyz;
#ifdef INTERP
dwtbl[i]=wtbl[i]-wtbl[i-1];
#endif
}
fac=(float)ltbl/(linv+0.5);
norm=sqrt (PI/2/C/lmbda)/sampl; /* 7/7/98 */
/* Note the final result at end of Phase 3 contains the factor,
norm^2. This incorporates the normalization of the 2D
inverse FFT in Phase 2 as well as scale factors involved
in the inverse Fourier transform of the convolvent.
7/7/98 */
winv[linv]=norm/Cnvlvnt(0.);
for (i=1;i<=linv;i++)
{
norm=-norm;
/* Minus sign for alternate entries
corrects for "natural" data layout
in array H at end of Phase 1. */
winv[linv+i] = winv[linv-i] = norm/Cnvlvnt(i*fac);
}
}
//---------------------------------------------------------------------------
float GridRec::legendre (int n, float *coefs, float x)
{
/***************************************************
* *
* Compute SUM(coefs(k)*P(2*k,x), for k=0,n/2) *
* *
* where P(j,x) is the jth Legendre polynomial *
* *
***************************************************/
float penult, last, newer, y;
int j, k, even;
if (x>1||x<-1){
fprintf(stderr, "\nInvalid argument to legendre()");
exit(2);
}
y=coefs[0];
penult=1.;
last=x;
even=1;
k=1;
for (j=2;j<=n;j++) {
newer=(x*(2*j-1)*last-(j-1)*penult)/j;
if (even) {
y+=newer*coefs[k];
even=0;
k++;
}
else
even=1;
penult=last;
last=newer;
}
return y;
}
//---------------------------------------------------------------------------
void GridRec::get_pswf (float C, pswf_struct **P)
{
int i=0;
while (i<NO_PSWFS && abs(C-pswf_db[i].C)>0.01)
i++;
if (i>=NO_PSWFS)
{
fprintf(stderr, "Prolate parameter, C = %f not in data base\n",C);
exit(2);
}
*P = &pswf_db[i];
return;
}
void GridRec::setGPUDeviceID(int id ){
deviceID = id;
}
int GridRec::getGPUDeviceID( ){
return deviceID;
}
|
b72bee14e837b41845111a625658a2939aac20f5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <cstdio>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
/*
to be compiled via nvcc ==> nvcc main.cu -o exec
*/
// CUDA Kernel function to add the elements of two arrays on the GPU
bool CUDA_ = false;
// __device__ indicates funtion to be executed by the gpu
__global__
void mykernel(void) {
printf( "hello from CUDA \n ==> saying hello from GPU \n"); // dunno why ==> works only with printf
}
__global__
void
summer_kernel(int* a )
{
*a = *a +1;
}
__global__
void add_tut
(int *a, int *b, int *c)
{
*c = *a + *b;
}
// __global__ functions must be void, are meant ot be called by device(GPU)
__global__
void
add(int N, int *x, int *y, int *add_result)
{
for (int i =0; i< N; ++i)
{
add_result[i] = x[i] + y[i];
}
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
add_result[i] = x[i] + y[i];
}
}
/*
__global__
add_smart
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
*/
int main(void)
{
int print_N_time_in_parallel = 12;
hipLaunchKernelGGL(( mykernel), dim3(print_N_time_in_parallel),dim3(1), 0, 0, ); // function invoked on GPU
// executing same operation on device:
int execute_N_time_in_parallel = 12;
int a_host;
a_host = 0;
int *a_device;
//(1) alloc memory on GPU
hipMalloc((void **)&a_device, sizeof(int));
//(2) copy value(s) into GPU variable
hipMemcpy(a_device, &a_host,sizeof(int),hipMemcpyHostToDevice);
//(3) execute command
hipLaunchKernelGGL(( summer_kernel) , dim3(execute_N_time_in_parallel),dim3(1) , 0, 0, a_device);
// (4) copy back into original value
int a_result;
hipMemcpy(&a_result,a_device, sizeof(int), hipMemcpyDeviceToHost);// returns 1 but execute it 12 times in parallel
std::cout << "EOC, sigle value ==> a_host = "<< a_result <<std::endl;
/*
summing 2 vector on device
*/
const int N = 10;
// host arrays
int x[N] = { 1, 2, 3, 4, 5 };
int y[N] = { 10, 20, 30, 40, 50 };
int z[N] = {0};
// device copies
int size_f = sizeof(int);
int *d_x = 0;
int *d_y = 0;
int *d_z = 0;
// vectors alllocation on GPU
hipMalloc((void **)&d_x, size_f*N);
hipMalloc((void **)&d_y, size_f*N);
hipMalloc((void **)&d_z, size_f*N);
// copy values, this operation maps device-values and host-values
hipMemcpy(d_x, x, size_f*N, hipMemcpyHostToDevice);
hipMemcpy(d_y, y, size_f*N, hipMemcpyHostToDevice);
//Launch add() kernel on GPU
/*
Launch a kernel on the GPU with one thread for each element.
2 is number of computational blocks and (N + 1) / 2 is a number of threads in a block
*/
hipLaunchKernelGGL(( add), dim3(2), dim3((N + 1) / 2), 0, 0, N, d_x, d_y, d_z);
/* hipDeviceSynchronize waits for the kernel to finish, and returns
any errors encountered during the launch.*/
hipDeviceSynchronize();
hipMemcpy(z, d_z, size_f*N, hipMemcpyDeviceToHost);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", z[0], z[1], z[2], z[3], z[4]);
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
a = 22;
b = 44;
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add_tut), dim3(execute_N_time_in_parallel),dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
std::cout << "tutorial==> sum result is \n c = "<< c << std::endl;
/* following line fails ahhahhahha
===>std::cout << "tutorial==> sum result is \n *d_c = "<< *d_c << std::endl; <=== */
// Free memory
//delete [] d_x;
//delete [] d_y;
hipDeviceReset();
hipFree(d_x);
hipFree(d_y);
hipFree(d_a);
hipFree(d_b);
return 0;
} | b72bee14e837b41845111a625658a2939aac20f5.cu | #include <iostream>
#include <math.h>
#include <cstdio>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
/*
to be compiled via nvcc ==> nvcc main.cu -o exec
*/
// CUDA Kernel function to add the elements of two arrays on the GPU
bool CUDA_ = false;
// __device__ indicates funtion to be executed by the gpu
__global__
void mykernel(void) {
printf( "hello from CUDA \n ==> saying hello from GPU \n"); // dunno why ==> works only with printf
}
__global__
void
summer_kernel(int* a )
{
*a = *a +1;
}
__global__
void add_tut
(int *a, int *b, int *c)
{
*c = *a + *b;
}
// __global__ functions must be void, are meant ot be called by device(GPU)
__global__
void
add(int N, int *x, int *y, int *add_result)
{
for (int i =0; i< N; ++i)
{
add_result[i] = x[i] + y[i];
}
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
add_result[i] = x[i] + y[i];
}
}
/*
__global__
add_smart
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
*/
int main(void)
{
int print_N_time_in_parallel = 12;
mykernel<<<print_N_time_in_parallel,1>>>(); // function invoked on GPU
// executing same operation on device:
int execute_N_time_in_parallel = 12;
int a_host;
a_host = 0;
int *a_device;
//(1) alloc memory on GPU
cudaMalloc((void **)&a_device, sizeof(int));
//(2) copy value(s) into GPU variable
cudaMemcpy(a_device, &a_host,sizeof(int),cudaMemcpyHostToDevice);
//(3) execute command
summer_kernel <<< execute_N_time_in_parallel,1 >>>(a_device);
// (4) copy back into original value
int a_result;
cudaMemcpy(&a_result,a_device, sizeof(int), cudaMemcpyDeviceToHost);// returns 1 but execute it 12 times in parallel
std::cout << "EOC, sigle value ==> a_host = "<< a_result <<std::endl;
/*
summing 2 vector on device
*/
const int N = 10;
// host arrays
int x[N] = { 1, 2, 3, 4, 5 };
int y[N] = { 10, 20, 30, 40, 50 };
int z[N] = {0};
// device copies
int size_f = sizeof(int);
int *d_x = 0;
int *d_y = 0;
int *d_z = 0;
// vectors alllocation on GPU
cudaMalloc((void **)&d_x, size_f*N);
cudaMalloc((void **)&d_y, size_f*N);
cudaMalloc((void **)&d_z, size_f*N);
// copy values, this operation maps device-values and host-values
cudaMemcpy(d_x, x, size_f*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size_f*N, cudaMemcpyHostToDevice);
//Launch add() kernel on GPU
/*
Launch a kernel on the GPU with one thread for each element.
2 is number of computational blocks and (N + 1) / 2 is a number of threads in a block
*/
add<<<2, (N + 1) / 2>>>(N, d_x, d_y, d_z);
/* cudaDeviceSynchronize waits for the kernel to finish, and returns
any errors encountered during the launch.*/
cudaDeviceSynchronize();
cudaMemcpy(z, d_z, size_f*N, cudaMemcpyDeviceToHost);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", z[0], z[1], z[2], z[3], z[4]);
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = 22;
b = 44;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add_tut<<<execute_N_time_in_parallel,1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
std::cout << "tutorial==> sum result is \n c = "<< c << std::endl;
/* following line fails ahhahhahha
===>std::cout << "tutorial==> sum result is \n *d_c = "<< *d_c << std::endl; <=== */
// Free memory
//delete [] d_x;
//delete [] d_y;
cudaDeviceReset();
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
ad3d5c7cfca7e122c935bed0fced42ff6926cbd3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#include "common.h"
#include <string.h>
//#include <hip/hip_runtime.h>
// Structure for creature
struct Creature {
int Energy;
int Velocity;
int TimeLeft;
int Code[100];
int codelen, codepos;
int ParentRef;
int Ref;
char Output[50];
bool Child;
};
typedef struct Creature Creature;
// Structure for World
struct World {
int Energy;
int TimeLeft;
struct Creature Lifes[5000];
// bool ChildLifes[5000];
int NumOfLifes;
int AliveCreatures;
int MaxEnergy;
char Input[50];
char Fitness[50];
};
typedef struct World World;
// Return rnadom number between min and max
int range_rand(int min_num, int max_num) {
if(min_num > max_num) {
fprintf(stderr, "min_num %i is greater than max_num %i!\n", min_num, max_num);
}
// Return random number in range
return min_num + (rand() % (max_num - min_num + 1));
}
bool IsAlive(Creature *Life)
{
if (Life->Energy > 0 && Life->TimeLeft > 0) return(true);
return(false);
}
Creature FindCreature(World *Iteration, int Ref)
{
for (int i = 0; i < Iteration->NumOfLifes; i++)
{
if (Iteration->Lifes[i].Ref == Ref) return(Iteration->Lifes[i]);
}
}
void PrintCode(Creature *Life)
{
for (int i = 0; i < Life->codelen; i++)
printf("%i", Life->Code[i]);
}
// Calculate All World Energy
int AllEnergy(World *Iteration)
{
int totalenergy = 0;
for (int i = 0; i < Iteration->NumOfLifes; i++)
{
if (Iteration->Lifes[i].TimeLeft > 0)
totalenergy += Iteration->Lifes[i].Energy;
}
return(totalenergy);
}
void PrintLife(Creature *Life)
{
printf("\n\rFunction:PrintLife Energy:%i Velocity:%i TimeLeft:%i codelen:%i codepos: %i parentref: %i ref: %i OUTPUT:",
Life->Energy, Life->Velocity, Life->TimeLeft, Life->codelen, Life->codepos, Life->ParentRef, Life->Ref);
for (int k = 0; k < Life->codelen; k++) {
printf("%c", Life->Output[k]);
}
printf("# \nCode:");
for (int k = 0; k < Life->codelen; k++) {
if (k == Life->codepos) printf("*");
printf("%i,", Life->Code[k]);
}
}
Creature InitLife(World *Iteration, int ParRef)
{
Creature Life;
Life.Energy = Iteration->MaxEnergy - AllEnergy(Iteration);
if (Life.Energy > 5) Life.Energy = 15;
Life.Velocity = 1;
Life.TimeLeft = 19;
Life.codelen = range_rand(5, 10);
Life.codepos = 0;
Life.Child = false;
// Life.Output = "";
for (int i = 0; i < Life.codelen; i++) Life.Code[i] = range_rand(1, 9);
// Life.Ref = range_rand(1, 65535);
Life.Ref = Iteration->NumOfLifes;
// if (ParRef == 0) printf("\n *** REF IS BROKEN");
Life.ParentRef = ParRef;
// printf("\n LIFE BORN");
// PrintLife(Life);
Iteration->Lifes[Iteration->NumOfLifes] = Life;
Iteration->NumOfLifes++;
return(Life);
}
void NewLife(World *Iteration, int ParRef, Creature *Life)
{
Life->Energy = Iteration->MaxEnergy - Iteration->Energy;
if (Life->Energy > 5) Life->Energy = 15;
Life->Velocity = 1;
Life->TimeLeft = 19;
Life->codelen = range_rand(5, 10);
Life->codepos = 0;
Life->Child = false;
// Life->Output = "";
for (int i = 0; i < Life->codelen; i++) Life->Code[i] = range_rand(1, 9);
Life->Ref = Iteration->NumOfLifes;
// if (ParRef == 0) printf("\n *** REF IS BROKEN");
Life->ParentRef = ParRef;
// printf("\n LIFE BORN");
// PrintLife(Life);
Iteration->Lifes[Iteration->NumOfLifes] = *Life;
Iteration->NumOfLifes++;
}
__global__ void RunLife(World *Iteration, const int n)
{
struct Creature NewLife; // Make a child with random permutation
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
Iteration->TimeLeft--;
// Iteration->AliveCreatures = 0;
// Iteration->Energy = 0;
// printf("\n\r------------------------\n\rFunction:PrintWorld TimeLeft:%i Energy:%i NumOfLifes:%i AliveCreatures: %i",
// Iteration->TimeLeft, Iteration->Energy, Iteration->NumOfLifes, Iteration->AliveCreatures);
if (i < n)
{
struct Creature Life = Iteration->Lifes[i];
int NewRef = Life.Ref;
// IsAlive
if (Life.Energy > 0 && Life.TimeLeft > 0)
{
// Iteration->Energy += Life.Energy;
// Iteration->AliveCreatures++;
// PrintLife
// printf("\n\rFunction:PrintLife Energy:%i Velocity:%i TimeLeft:%i codelen:%i codepos: %i parentref: %i ref: %i \nCode:",
// Life.Energy, Life.Velocity, Life.TimeLeft, Life.codelen, Life.codepos, Life.ParentRef, Life.Ref);
// for (int k = 0; k < Life.codelen; k++) printf("%i", Life.Code[k]);
// run code "Velocity" number of times
for (int i = 0; i < Life.Velocity; i++) {
int k;
switch(Life.Code[Life.codepos])
{
case 1: Life.Energy += 2;
break;
case 2: Life.Velocity++; //if (Life.codelen > 3) Life.codelen = Life.codelen/2; // Half genome
break;
case 3: Life.Output[Life.codepos] = Life.Output[Life.codepos];
//for (k = 0; k < Life.codelen-1; k++) // Learn from myself? other creature
//Life.Code[Life.codelen+k] = Life.Code[k+1];
//Life.codelen = Life.codelen+k;
break;
case 4: //Life.Child = true;
//Life.Output--;
break;
case 5: //Life.Output++;
break;
case 6: //Life.Output = Life.Output + Iteration->Input; break;
case 7: //Life.Output = Life.Output - Iteration->Input; break;
case 8: //Life.Output = Life.Output * Iteration->Input; break;
case 9: //Life.Output = Life.Output / Iteration->Input; break;
}
Life.codepos++;
if (Life.codepos > Life.codelen) Life.codepos = 0;
}
Life.TimeLeft--;
Life.Energy--;
}
// PrintLife
// printf("\n\rFunction:PrintLife Energy:%i Velocity:%i TimeLeft:%i codelen:%i codepos: %i parentref: %i ref: %i \nCode:",
// Life.Energy, Life.Velocity, Life.TimeLeft, Life.codelen, Life.codepos, Life.ParentRef, Life.Ref);
// for (int k = 0; k < Life.codelen; k++) printf("%i", Life.Code[k]);
Iteration->Lifes[i] = Life;
}
// printf("\n\r------------------------\n\rFunction:PrintWorld TimeLeft:%i Energy:%i NumOfLifes:%i AliveCreatures: %i",
// Iteration->TimeLeft, Iteration->Energy, Iteration->NumOfLifes, Iteration->AliveCreatures);
}
World InitWorld(void)
{
World Iteration;
Iteration.Energy = 0;
Iteration.TimeLeft = 200;
Iteration.NumOfLifes = 0;
Iteration.MaxEnergy = 50;
Iteration.AliveCreatures = 0;
// Iteration.Input = 0;
// Iteration.Fitness = ((((Iteration.Input + Iteration.Input) * Iteration.Input) - Iteration.Input) / Iteration.Input) + Iteration.Input;
InitLife(&Iteration, 0);
InitLife(&Iteration, 0);
return(Iteration);
}
void NewWorld(World *Iteration)
{
Iteration->Energy = 0;
Iteration->TimeLeft = 200;
Iteration->NumOfLifes = 0;
Iteration->MaxEnergy = 50;
Iteration->AliveCreatures = 0;
strcpy(Iteration->Input, "Hello World");
// Iteration->Fitness = ((((Iteration->Input + Iteration->Input + 1) * Iteration->Input) - Iteration->Input) / Iteration->Input) + Iteration->Input - 1;
// Iteration->Fitness = (Iteration->Input * Iteration->Input) * Iteration->Input + 1;
strcpy(Iteration->Fitness, "dlroW");
for (int i = 0; i < 2; i++)
{
InitLife(Iteration, 0);
}
Creature ArtLife = InitLife(Iteration, -1);
// ArtLife.Code = {5,1,8,2,6,6,1,3,3,1,6};
ArtLife.Code[0] = 8;
ArtLife.Code[1] = 4;
ArtLife.codelen = 2;
}
void PrintWorld(World *Iteration)
{
printf("\n\r------------------------\n\rFunction:PrintWorld TimeLeft:%i Energy:%i NumOfLifes:%i AliveCreatures: %i\n--------------------",
Iteration->TimeLeft, Iteration->Energy, Iteration->NumOfLifes, Iteration->AliveCreatures);
}
// Run World Iteration
void RunWorld(World *Iteration)
{
Iteration->Energy = AllEnergy(Iteration);
Iteration->TimeLeft--;
PrintWorld(Iteration);
// int i;
// scanf("%i", &i);
Iteration->AliveCreatures = 0;
hipLaunchKernelGGL(( RunLife) , dim3(1), dim3(10), 0, 0, Iteration, 1<<22);
hipDeviceSynchronize();
// for (int i = 0; i < Iteration->NumOfLifes; i++)
// {
// printf("\n Life number: %i", i);
// int CurRef = RunLife <<<1, 1>>>(Iteration, &Iteration->Lifes[i]);
// RunLife <<<1, 1>>>(Iteration, &Iteration->Lifes[i]);
// }
if (Iteration->TimeLeft > 0 && Iteration->Energy > 0) RunWorld(Iteration);
}
__global__ void helloFromGPU(void)
{
printf("Hello World from GPU thread");
}
long SumOfChars(char c[50])
{
long total = 0;
for(int i = 0 ; i < strlen(c); i++)
{
total += c[i]-'0';
}
return(total);
}
int main(int argc, char **argv)
{
time_t t;
// Intializes random number generator
srand((unsigned) time(&t));
printf("%ld vs %ld", SumOfChars("Hello"), SumOfChars("Hello"));
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// allocate host memory
int nElem = 1<22;
size_t nBytes = sizeof(World);
World *h_A = (World *)malloc(nBytes);
World *hostRef = (World *)malloc(nBytes);
World *gpuRef = (World *)malloc(nBytes);
// initialize host array
NewWorld(gpuRef);
// allocate device memory
World *d_A, *d_C;
CHECK(hipMalloc((World**)&d_A, nBytes));
CHECK(hipMalloc((World**)&d_C, nBytes));
// copy data from host to device
// CHECK(hipMemcpy(d_A, gpuRef, nBytes, hipMemcpyHostToDevice));
PrintLife(&gpuRef->Lifes[0]);
PrintLife(&gpuRef->Lifes[1]);
PrintLife(&h_A->Lifes[2]);
PrintWorld(gpuRef);
long BestFit = SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[0].Output);
// Run World all iterations
// for (int i = 0; i < 200; i++)
do
{
// for (int j = 0; j < gpuRef->NumOfLifes; j++) {
// gpuRef->ChildLifes[j] = false;
// printf(">>%d", gpuRef->ChildLifes[j]);
// }
// copy data from host to device
CHECK(hipMemcpy(d_A, gpuRef, nBytes, hipMemcpyHostToDevice));
// RunLife <<<1, gpuRef->NumOfLifes>>>(d_A, 1<<22);
hipLaunchKernelGGL(( RunLife) , dim3(1), dim3(512), 0, 0, d_A, 1<<22);
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(gpuRef, d_A, nBytes, hipMemcpyDeviceToHost));
gpuRef->AliveCreatures = 0;
gpuRef->Energy = 0;
BestFit = SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[0].Output);
int BestFitNo = 0;
for (int j = 0; j < gpuRef->NumOfLifes; j++) {
// PrintLife(&gpuRef->Lifes[j]);
// printf(">>%d", gpuRef->ChildLifes[j]);
if (gpuRef->Lifes[j].Energy > 0 && gpuRef->Lifes[j].TimeLeft > 0) {
PrintLife(&gpuRef->Lifes[j]);
gpuRef->AliveCreatures++;
gpuRef->Energy += gpuRef->Lifes[j].Energy;
// PrintLife(&gpuRef->Lifes[j]);
printf(" *** BestFit = %s - %s = %ld vs CurBestFit %ld", gpuRef->Fitness, gpuRef->Lifes[j].Output, SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[j].Output), BestFit);
if (SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[j].Output) < BestFit) {
BestFit = SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[j].Output);
BestFitNo = j;
// printf(" *** BestFit = %i - %i = %i", gpuRef->Lifes[j].Output, gpuRef->Fitness, BestFit);
if (BestFit == 0) break;
}
}
}
// if (gpuRef->Lifes[j].Child == true)
// {
// gpuRef->Lifes[j].Child = false;
// printf("\n ***LIFE IS BORN from %i", gpuRef->Lifes[BestFitNo].Ref);
// PrintLife(&gpuRef->Lifes[BestFitNo]);
gpuRef->Lifes[gpuRef->NumOfLifes].Energy = 15;
gpuRef->Lifes[gpuRef->NumOfLifes].TimeLeft = 19;
gpuRef->Lifes[gpuRef->NumOfLifes].Velocity = 1;
gpuRef->Lifes[gpuRef->NumOfLifes].codelen = gpuRef->Lifes[BestFitNo].codelen;
gpuRef->Lifes[gpuRef->NumOfLifes].codepos = 0;
for (int k = 0; k < gpuRef->Lifes[BestFitNo].codelen; k++) {
if (range_rand(1, 3) == 1) {
gpuRef->Lifes[gpuRef->NumOfLifes].Code[k] = range_rand(1, 9);
}
else {
gpuRef->Lifes[gpuRef->NumOfLifes].Code[k] = gpuRef->Lifes[BestFitNo].Code[k];
}
}
gpuRef->Lifes[gpuRef->NumOfLifes].Ref = gpuRef->NumOfLifes;
gpuRef->Lifes[gpuRef->NumOfLifes].ParentRef = gpuRef->Lifes[BestFitNo].Ref;
// printf("\n *** Parent: %i", j);
printf("\n ***LIFE IS BORN from %i", gpuRef->Lifes[BestFitNo].Ref);
PrintLife(&gpuRef->Lifes[gpuRef->NumOfLifes]);
gpuRef->NumOfLifes++;
// }
// }
PrintWorld(gpuRef);
// copy data from host to device
// CHECK(hipMemcpy(d_A, gpuRef, nBytes, hipMemcpyHostToDevice));
} while (gpuRef->Energy > 0 && gpuRef->TimeLeft > 0 && BestFit != 0);
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(gpuRef, d_A, nBytes, hipMemcpyDeviceToHost));
PrintWorld(gpuRef);
// PrintLife(&gpuRef->Lifes[0]);
// PrintLife(&gpuRef->Lifes[1]);
// PrintLife(&gpuRef->Lifes[2]);
CHECK(hipGetLastError());
// RunWorld(&NewWorld);
printf("\n\n *** Admire the winners genomes history:");
for (int i = 0; i < gpuRef->NumOfLifes; i++)
{
Creature Parent = gpuRef->Lifes[i];
if (IsAlive(&Parent)) {
PrintLife(&Parent);
while (Parent.ParentRef > 0) {
Parent = FindCreature(gpuRef, Parent.ParentRef);
printf("->");
PrintCode(&Parent);
// PrintLife <<<1,1>>>(Parent);
}
}
}
printf("\n\n *** Admire the winners story:");
for (int i = 0; i < gpuRef->NumOfLifes; i++)
{
Creature Parent = gpuRef->Lifes[i];
if (IsAlive(&Parent)) {
// PrintLife(Parent);
printf("\n");
while (Parent.ParentRef > 0) {
printf("%i->", Parent.Ref);
Parent = FindCreature(gpuRef, Parent.ParentRef);
// PrintLife <<<1, 1>>>(Parent);
}
}
}
printf("\n");
}
| ad3d5c7cfca7e122c935bed0fced42ff6926cbd3.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#include "common.h"
#include <string.h>
//#include <cuda_runtime.h>
// Structure for creature
struct Creature {
int Energy;
int Velocity;
int TimeLeft;
int Code[100];
int codelen, codepos;
int ParentRef;
int Ref;
char Output[50];
bool Child;
};
typedef struct Creature Creature;
// Structure for World
struct World {
int Energy;
int TimeLeft;
struct Creature Lifes[5000];
// bool ChildLifes[5000];
int NumOfLifes;
int AliveCreatures;
int MaxEnergy;
char Input[50];
char Fitness[50];
};
typedef struct World World;
// Return rnadom number between min and max
int range_rand(int min_num, int max_num) {
if(min_num > max_num) {
fprintf(stderr, "min_num %i is greater than max_num %i!\n", min_num, max_num);
}
// Return random number in range
return min_num + (rand() % (max_num - min_num + 1));
}
bool IsAlive(Creature *Life)
{
if (Life->Energy > 0 && Life->TimeLeft > 0) return(true);
return(false);
}
Creature FindCreature(World *Iteration, int Ref)
{
for (int i = 0; i < Iteration->NumOfLifes; i++)
{
if (Iteration->Lifes[i].Ref == Ref) return(Iteration->Lifes[i]);
}
}
void PrintCode(Creature *Life)
{
for (int i = 0; i < Life->codelen; i++)
printf("%i", Life->Code[i]);
}
// Calculate All World Energy
int AllEnergy(World *Iteration)
{
int totalenergy = 0;
for (int i = 0; i < Iteration->NumOfLifes; i++)
{
if (Iteration->Lifes[i].TimeLeft > 0)
totalenergy += Iteration->Lifes[i].Energy;
}
return(totalenergy);
}
void PrintLife(Creature *Life)
{
printf("\n\rFunction:PrintLife Energy:%i Velocity:%i TimeLeft:%i codelen:%i codepos: %i parentref: %i ref: %i OUTPUT:",
Life->Energy, Life->Velocity, Life->TimeLeft, Life->codelen, Life->codepos, Life->ParentRef, Life->Ref);
for (int k = 0; k < Life->codelen; k++) {
printf("%c", Life->Output[k]);
}
printf("# \nCode:");
for (int k = 0; k < Life->codelen; k++) {
if (k == Life->codepos) printf("*");
printf("%i,", Life->Code[k]);
}
}
Creature InitLife(World *Iteration, int ParRef)
{
Creature Life;
Life.Energy = Iteration->MaxEnergy - AllEnergy(Iteration);
if (Life.Energy > 5) Life.Energy = 15;
Life.Velocity = 1;
Life.TimeLeft = 19;
Life.codelen = range_rand(5, 10);
Life.codepos = 0;
Life.Child = false;
// Life.Output = "";
for (int i = 0; i < Life.codelen; i++) Life.Code[i] = range_rand(1, 9);
// Life.Ref = range_rand(1, 65535);
Life.Ref = Iteration->NumOfLifes;
// if (ParRef == 0) printf("\n *** REF IS BROKEN");
Life.ParentRef = ParRef;
// printf("\n LIFE BORN");
// PrintLife(Life);
Iteration->Lifes[Iteration->NumOfLifes] = Life;
Iteration->NumOfLifes++;
return(Life);
}
void NewLife(World *Iteration, int ParRef, Creature *Life)
{
Life->Energy = Iteration->MaxEnergy - Iteration->Energy;
if (Life->Energy > 5) Life->Energy = 15;
Life->Velocity = 1;
Life->TimeLeft = 19;
Life->codelen = range_rand(5, 10);
Life->codepos = 0;
Life->Child = false;
// Life->Output = "";
for (int i = 0; i < Life->codelen; i++) Life->Code[i] = range_rand(1, 9);
Life->Ref = Iteration->NumOfLifes;
// if (ParRef == 0) printf("\n *** REF IS BROKEN");
Life->ParentRef = ParRef;
// printf("\n LIFE BORN");
// PrintLife(Life);
Iteration->Lifes[Iteration->NumOfLifes] = *Life;
Iteration->NumOfLifes++;
}
__global__ void RunLife(World *Iteration, const int n)
{
struct Creature NewLife; // Make a child with random permutation
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
Iteration->TimeLeft--;
// Iteration->AliveCreatures = 0;
// Iteration->Energy = 0;
// printf("\n\r------------------------\n\rFunction:PrintWorld TimeLeft:%i Energy:%i NumOfLifes:%i AliveCreatures: %i",
// Iteration->TimeLeft, Iteration->Energy, Iteration->NumOfLifes, Iteration->AliveCreatures);
if (i < n)
{
struct Creature Life = Iteration->Lifes[i];
int NewRef = Life.Ref;
// IsAlive
if (Life.Energy > 0 && Life.TimeLeft > 0)
{
// Iteration->Energy += Life.Energy;
// Iteration->AliveCreatures++;
// PrintLife
// printf("\n\rFunction:PrintLife Energy:%i Velocity:%i TimeLeft:%i codelen:%i codepos: %i parentref: %i ref: %i \nCode:",
// Life.Energy, Life.Velocity, Life.TimeLeft, Life.codelen, Life.codepos, Life.ParentRef, Life.Ref);
// for (int k = 0; k < Life.codelen; k++) printf("%i", Life.Code[k]);
// run code "Velocity" number of times
for (int i = 0; i < Life.Velocity; i++) {
int k;
switch(Life.Code[Life.codepos])
{
case 1: Life.Energy += 2;
break;
case 2: Life.Velocity++; //if (Life.codelen > 3) Life.codelen = Life.codelen/2; // Half genome
break;
case 3: Life.Output[Life.codepos] = Life.Output[Life.codepos];
//for (k = 0; k < Life.codelen-1; k++) // Learn from myself? other creature
//Life.Code[Life.codelen+k] = Life.Code[k+1];
//Life.codelen = Life.codelen+k;
break;
case 4: //Life.Child = true;
//Life.Output--;
break;
case 5: //Life.Output++;
break;
case 6: //Life.Output = Life.Output + Iteration->Input; break;
case 7: //Life.Output = Life.Output - Iteration->Input; break;
case 8: //Life.Output = Life.Output * Iteration->Input; break;
case 9: //Life.Output = Life.Output / Iteration->Input; break;
}
Life.codepos++;
if (Life.codepos > Life.codelen) Life.codepos = 0;
}
Life.TimeLeft--;
Life.Energy--;
}
// PrintLife
// printf("\n\rFunction:PrintLife Energy:%i Velocity:%i TimeLeft:%i codelen:%i codepos: %i parentref: %i ref: %i \nCode:",
// Life.Energy, Life.Velocity, Life.TimeLeft, Life.codelen, Life.codepos, Life.ParentRef, Life.Ref);
// for (int k = 0; k < Life.codelen; k++) printf("%i", Life.Code[k]);
Iteration->Lifes[i] = Life;
}
// printf("\n\r------------------------\n\rFunction:PrintWorld TimeLeft:%i Energy:%i NumOfLifes:%i AliveCreatures: %i",
// Iteration->TimeLeft, Iteration->Energy, Iteration->NumOfLifes, Iteration->AliveCreatures);
}
World InitWorld(void)
{
World Iteration;
Iteration.Energy = 0;
Iteration.TimeLeft = 200;
Iteration.NumOfLifes = 0;
Iteration.MaxEnergy = 50;
Iteration.AliveCreatures = 0;
// Iteration.Input = 0;
// Iteration.Fitness = ((((Iteration.Input + Iteration.Input) * Iteration.Input) - Iteration.Input) / Iteration.Input) + Iteration.Input;
InitLife(&Iteration, 0);
InitLife(&Iteration, 0);
return(Iteration);
}
void NewWorld(World *Iteration)
{
Iteration->Energy = 0;
Iteration->TimeLeft = 200;
Iteration->NumOfLifes = 0;
Iteration->MaxEnergy = 50;
Iteration->AliveCreatures = 0;
strcpy(Iteration->Input, "Hello World");
// Iteration->Fitness = ((((Iteration->Input + Iteration->Input + 1) * Iteration->Input) - Iteration->Input) / Iteration->Input) + Iteration->Input - 1;
// Iteration->Fitness = (Iteration->Input * Iteration->Input) * Iteration->Input + 1;
strcpy(Iteration->Fitness, "dlroW");
for (int i = 0; i < 2; i++)
{
InitLife(Iteration, 0);
}
Creature ArtLife = InitLife(Iteration, -1);
// ArtLife.Code = {5,1,8,2,6,6,1,3,3,1,6};
ArtLife.Code[0] = 8;
ArtLife.Code[1] = 4;
ArtLife.codelen = 2;
}
void PrintWorld(World *Iteration)
{
printf("\n\r------------------------\n\rFunction:PrintWorld TimeLeft:%i Energy:%i NumOfLifes:%i AliveCreatures: %i\n--------------------",
Iteration->TimeLeft, Iteration->Energy, Iteration->NumOfLifes, Iteration->AliveCreatures);
}
// Run World Iteration
void RunWorld(World *Iteration)
{
Iteration->Energy = AllEnergy(Iteration);
Iteration->TimeLeft--;
PrintWorld(Iteration);
// int i;
// scanf("%i", &i);
Iteration->AliveCreatures = 0;
RunLife <<<1, 10>>>(Iteration, 1<<22);
cudaDeviceSynchronize();
// for (int i = 0; i < Iteration->NumOfLifes; i++)
// {
// printf("\n Life number: %i", i);
// int CurRef = RunLife <<<1, 1>>>(Iteration, &Iteration->Lifes[i]);
// RunLife <<<1, 1>>>(Iteration, &Iteration->Lifes[i]);
// }
if (Iteration->TimeLeft > 0 && Iteration->Energy > 0) RunWorld(Iteration);
}
__global__ void helloFromGPU(void)
{
printf("Hello World from GPU thread");
}
long SumOfChars(char c[50])
{
long total = 0;
for(int i = 0 ; i < strlen(c); i++)
{
total += c[i]-'0';
}
return(total);
}
int main(int argc, char **argv)
{
time_t t;
// Intializes random number generator
srand((unsigned) time(&t));
printf("%ld vs %ld", SumOfChars("Hello"), SumOfChars("Hello"));
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate host memory
int nElem = 1<22;
size_t nBytes = sizeof(World);
World *h_A = (World *)malloc(nBytes);
World *hostRef = (World *)malloc(nBytes);
World *gpuRef = (World *)malloc(nBytes);
// initialize host array
NewWorld(gpuRef);
// allocate device memory
World *d_A, *d_C;
CHECK(cudaMalloc((World**)&d_A, nBytes));
CHECK(cudaMalloc((World**)&d_C, nBytes));
// copy data from host to device
// CHECK(cudaMemcpy(d_A, gpuRef, nBytes, cudaMemcpyHostToDevice));
PrintLife(&gpuRef->Lifes[0]);
PrintLife(&gpuRef->Lifes[1]);
PrintLife(&h_A->Lifes[2]);
PrintWorld(gpuRef);
long BestFit = SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[0].Output);
// Run World all iterations
// for (int i = 0; i < 200; i++)
do
{
// for (int j = 0; j < gpuRef->NumOfLifes; j++) {
// gpuRef->ChildLifes[j] = false;
// printf(">>%d", gpuRef->ChildLifes[j]);
// }
// copy data from host to device
CHECK(cudaMemcpy(d_A, gpuRef, nBytes, cudaMemcpyHostToDevice));
// RunLife <<<1, gpuRef->NumOfLifes>>>(d_A, 1<<22);
RunLife <<<1, 512>>>(d_A, 1<<22);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(gpuRef, d_A, nBytes, cudaMemcpyDeviceToHost));
gpuRef->AliveCreatures = 0;
gpuRef->Energy = 0;
BestFit = SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[0].Output);
int BestFitNo = 0;
for (int j = 0; j < gpuRef->NumOfLifes; j++) {
// PrintLife(&gpuRef->Lifes[j]);
// printf(">>%d", gpuRef->ChildLifes[j]);
if (gpuRef->Lifes[j].Energy > 0 && gpuRef->Lifes[j].TimeLeft > 0) {
PrintLife(&gpuRef->Lifes[j]);
gpuRef->AliveCreatures++;
gpuRef->Energy += gpuRef->Lifes[j].Energy;
// PrintLife(&gpuRef->Lifes[j]);
printf(" *** BestFit = %s - %s = %ld vs CurBestFit %ld", gpuRef->Fitness, gpuRef->Lifes[j].Output, SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[j].Output), BestFit);
if (SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[j].Output) < BestFit) {
BestFit = SumOfChars(gpuRef->Fitness) - SumOfChars(gpuRef->Lifes[j].Output);
BestFitNo = j;
// printf(" *** BestFit = %i - %i = %i", gpuRef->Lifes[j].Output, gpuRef->Fitness, BestFit);
if (BestFit == 0) break;
}
}
}
// if (gpuRef->Lifes[j].Child == true)
// {
// gpuRef->Lifes[j].Child = false;
// printf("\n ***LIFE IS BORN from %i", gpuRef->Lifes[BestFitNo].Ref);
// PrintLife(&gpuRef->Lifes[BestFitNo]);
gpuRef->Lifes[gpuRef->NumOfLifes].Energy = 15;
gpuRef->Lifes[gpuRef->NumOfLifes].TimeLeft = 19;
gpuRef->Lifes[gpuRef->NumOfLifes].Velocity = 1;
gpuRef->Lifes[gpuRef->NumOfLifes].codelen = gpuRef->Lifes[BestFitNo].codelen;
gpuRef->Lifes[gpuRef->NumOfLifes].codepos = 0;
for (int k = 0; k < gpuRef->Lifes[BestFitNo].codelen; k++) {
if (range_rand(1, 3) == 1) {
gpuRef->Lifes[gpuRef->NumOfLifes].Code[k] = range_rand(1, 9);
}
else {
gpuRef->Lifes[gpuRef->NumOfLifes].Code[k] = gpuRef->Lifes[BestFitNo].Code[k];
}
}
gpuRef->Lifes[gpuRef->NumOfLifes].Ref = gpuRef->NumOfLifes;
gpuRef->Lifes[gpuRef->NumOfLifes].ParentRef = gpuRef->Lifes[BestFitNo].Ref;
// printf("\n *** Parent: %i", j);
printf("\n ***LIFE IS BORN from %i", gpuRef->Lifes[BestFitNo].Ref);
PrintLife(&gpuRef->Lifes[gpuRef->NumOfLifes]);
gpuRef->NumOfLifes++;
// }
// }
PrintWorld(gpuRef);
// copy data from host to device
// CHECK(cudaMemcpy(d_A, gpuRef, nBytes, cudaMemcpyHostToDevice));
} while (gpuRef->Energy > 0 && gpuRef->TimeLeft > 0 && BestFit != 0);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(gpuRef, d_A, nBytes, cudaMemcpyDeviceToHost));
PrintWorld(gpuRef);
// PrintLife(&gpuRef->Lifes[0]);
// PrintLife(&gpuRef->Lifes[1]);
// PrintLife(&gpuRef->Lifes[2]);
CHECK(cudaGetLastError());
// RunWorld(&NewWorld);
printf("\n\n *** Admire the winners genomes history:");
for (int i = 0; i < gpuRef->NumOfLifes; i++)
{
Creature Parent = gpuRef->Lifes[i];
if (IsAlive(&Parent)) {
PrintLife(&Parent);
while (Parent.ParentRef > 0) {
Parent = FindCreature(gpuRef, Parent.ParentRef);
printf("->");
PrintCode(&Parent);
// PrintLife <<<1,1>>>(Parent);
}
}
}
printf("\n\n *** Admire the winners story:");
for (int i = 0; i < gpuRef->NumOfLifes; i++)
{
Creature Parent = gpuRef->Lifes[i];
if (IsAlive(&Parent)) {
// PrintLife(Parent);
printf("\n");
while (Parent.ParentRef > 0) {
printf("%i->", Parent.Ref);
Parent = FindCreature(gpuRef, Parent.ParentRef);
// PrintLife <<<1, 1>>>(Parent);
}
}
}
printf("\n");
}
|
c1be77bca2ae5c69b2590ea3323518f04769c143.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
/* Add two vectors on the GPU */
__global__ void vectorAddGPU(float *a, float *b, float *c, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
// Allocate generic memory with malloc() and pin it laster instead of using hipHostMalloc()
bool bPinGenericMemory = false;
// Macro to aligned up to the memory size in question
#define MEMORY_ALIGNMENT 4096
#define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) )
int main(int argc, char **argv)
{
int n, nelem, deviceCount;
int idev = 0; // use default device 0
char *device = NULL;
unsigned int flags;
size_t bytes;
float *a, *b, *c; // Pinned memory allocated on the CPU
float *a_UA, *b_UA, *c_UA; // Non-4K Aligned Pinned memory on the CPU
float *d_a, *d_b, *d_c; // Device pointers for mapped memory
float errorNorm, refNorm, ref, diff;
hipDeviceProp_t deviceProp;
if (checkCmdLineFlag(argc, (const char **)argv, "help"))
{
printf("Usage: simpleZeroCopy [OPTION]\n\n");
printf("Options:\n");
printf(" --device=[device #] Specify the device to be used\n");
printf(" --use_generic_memory (optional) use generic page-aligned for system memory\n");
return EXIT_SUCCESS;
}
/* Get the device selected by the user or default to 0, and then set it. */
if (getCmdLineArgumentString(argc, (const char **)argv, "device", &device))
{
hipGetDeviceCount(&deviceCount);
idev = atoi(device);
if (idev >= deviceCount || idev < 0)
{
fprintf(stderr, "Device number %d is invalid, will use default CUDA device 0.\n", idev);
idev = 0;
}
}
// if GPU found supports SM 1.2, then continue, otherwise we exit
if (!checkCudaCapabilities(1, 2))
{
exit(EXIT_SUCCESS);
}
if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory"))
{
#if defined(__APPLE__) || defined(MACOSX)
bPinGenericMemory = false; // Generic Pinning of System Paged memory is not currently supported on Mac OSX
#else
bPinGenericMemory = true;
#endif
}
if (bPinGenericMemory)
{
printf("> Using Generic System Paged Memory (malloc)\n");
}
else
{
printf("> Using CUDA Host Allocated (hipHostMalloc)\n");
}
checkCudaErrors(hipSetDevice(idev));
/* Verify the selected device supports mapped memory and set the device
flags for mapping host memory. */
checkCudaErrors(hipGetDeviceProperties(&deviceProp, idev));
#if CUDART_VERSION >= 2020
if (!deviceProp.canMapHostMemory)
{
fprintf(stderr, "Device %d does not support mapping CPU host memory!\n", idev);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exitsits
hipDeviceReset();
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipSetDeviceFlags(hipDeviceMapHost));
#else
fprintf(stderr, "CUDART version %d.%d does not support <hipDeviceProp_t.canMapHostMemory> field\n", , CUDART_VERSION/1000, (CUDART_VERSION%100)/10);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exitsits
hipDeviceReset();
exit(EXIT_SUCCESS);
#endif
#if CUDART_VERSION < 4000
if (bPinGenericMemory)
{
fprintf(stderr, "CUDART version %d.%d does not support <hipHostRegister> function\n", CUDART_VERSION/1000, (CUDART_VERSION%100)/10);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exitsits
hipDeviceReset();
exit(EXIT_SUCCESS);
}
#endif
/* Allocate mapped CPU memory. */
nelem = 1048576;
bytes = nelem*sizeof(float);
if (bPinGenericMemory)
{
#if CUDART_VERSION >= 4000
a_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT);
b_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT);
c_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT);
// We need to ensure memory is aligned to 4K (so we will need to padd memory accordingly)
a = (float *) ALIGN_UP(a_UA, MEMORY_ALIGNMENT);
b = (float *) ALIGN_UP(b_UA, MEMORY_ALIGNMENT);
c = (float *) ALIGN_UP(c_UA, MEMORY_ALIGNMENT);
checkCudaErrors(hipHostRegister(a, bytes, HIP_MEMHOSTALLOC_DEVICEMAP));
checkCudaErrors(hipHostRegister(b, bytes, HIP_MEMHOSTALLOC_DEVICEMAP));
checkCudaErrors(hipHostRegister(c, bytes, HIP_MEMHOSTALLOC_DEVICEMAP));
#endif
}
else
{
#if CUDART_VERSION >= 2020
flags = hipHostMallocMapped;
checkCudaErrors(hipHostMalloc((void **)&a, bytes, flags));
checkCudaErrors(hipHostMalloc((void **)&b, bytes, flags));
checkCudaErrors(hipHostMalloc((void **)&c, bytes, flags));
#endif
}
/* Initialize the vectors. */
for (n = 0; n < nelem; n++)
{
a[n] = rand() / (float)RAND_MAX;
b[n] = rand() / (float)RAND_MAX;
}
/* Get the device pointers for the pinned CPU memory mapped into the GPU
memory space. */
#if CUDART_VERSION >= 2020
checkCudaErrors(hipHostGetDevicePointer((void **)&d_a, (void *)a, 0));
checkCudaErrors(hipHostGetDevicePointer((void **)&d_b, (void *)b, 0));
checkCudaErrors(hipHostGetDevicePointer((void **)&d_c, (void *)c, 0));
#endif
/* Call the GPU kernel using the CPU pointers residing in CPU mapped memory. */
printf("> vectorAddGPU kernel will add vectors using mapped CPU memory...\n");
dim3 block(256);
dim3 grid((unsigned int)ceil(nelem/(float)block.x));
hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, nelem);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("vectorAddGPU() execution failed");
/* Compare the results */
printf("> Checking the results from vectorAddGPU() ...\n");
errorNorm = 0.f;
refNorm = 0.f;
for (n = 0; n < nelem; n++)
{
ref = a[n] + b[n];
diff = c[n] - ref;
errorNorm += diff*diff;
refNorm += ref*ref;
}
errorNorm = (float)sqrt((double)errorNorm);
refNorm = (float)sqrt((double)refNorm);
/* Memory clean up */
printf("> Releasing CPU memory...\n");
if (bPinGenericMemory)
{
#if CUDART_VERSION >= 4000
checkCudaErrors(hipHostUnregister(a));
checkCudaErrors(hipHostUnregister(b));
checkCudaErrors(hipHostUnregister(c));
free(a_UA);
free(b_UA);
free(c_UA);
#endif
}
else
{
#if CUDART_VERSION >= 2020
checkCudaErrors(hipHostFree(a));
checkCudaErrors(hipHostFree(b));
checkCudaErrors(hipHostFree(c));
#endif
}
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exitsits
hipDeviceReset();
exit(errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE);
}
| c1be77bca2ae5c69b2590ea3323518f04769c143.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
/* Add two vectors on the GPU */
__global__ void vectorAddGPU(float *a, float *b, float *c, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
// Allocate generic memory with malloc() and pin it laster instead of using cudaHostAlloc()
bool bPinGenericMemory = false;
// Macro to aligned up to the memory size in question
#define MEMORY_ALIGNMENT 4096
#define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) )
int main(int argc, char **argv)
{
int n, nelem, deviceCount;
int idev = 0; // use default device 0
char *device = NULL;
unsigned int flags;
size_t bytes;
float *a, *b, *c; // Pinned memory allocated on the CPU
float *a_UA, *b_UA, *c_UA; // Non-4K Aligned Pinned memory on the CPU
float *d_a, *d_b, *d_c; // Device pointers for mapped memory
float errorNorm, refNorm, ref, diff;
cudaDeviceProp deviceProp;
if (checkCmdLineFlag(argc, (const char **)argv, "help"))
{
printf("Usage: simpleZeroCopy [OPTION]\n\n");
printf("Options:\n");
printf(" --device=[device #] Specify the device to be used\n");
printf(" --use_generic_memory (optional) use generic page-aligned for system memory\n");
return EXIT_SUCCESS;
}
/* Get the device selected by the user or default to 0, and then set it. */
if (getCmdLineArgumentString(argc, (const char **)argv, "device", &device))
{
cudaGetDeviceCount(&deviceCount);
idev = atoi(device);
if (idev >= deviceCount || idev < 0)
{
fprintf(stderr, "Device number %d is invalid, will use default CUDA device 0.\n", idev);
idev = 0;
}
}
// if GPU found supports SM 1.2, then continue, otherwise we exit
if (!checkCudaCapabilities(1, 2))
{
exit(EXIT_SUCCESS);
}
if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory"))
{
#if defined(__APPLE__) || defined(MACOSX)
bPinGenericMemory = false; // Generic Pinning of System Paged memory is not currently supported on Mac OSX
#else
bPinGenericMemory = true;
#endif
}
if (bPinGenericMemory)
{
printf("> Using Generic System Paged Memory (malloc)\n");
}
else
{
printf("> Using CUDA Host Allocated (cudaHostAlloc)\n");
}
checkCudaErrors(cudaSetDevice(idev));
/* Verify the selected device supports mapped memory and set the device
flags for mapping host memory. */
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, idev));
#if CUDART_VERSION >= 2020
if (!deviceProp.canMapHostMemory)
{
fprintf(stderr, "Device %d does not support mapping CPU host memory!\n", idev);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exitsits
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceMapHost));
#else
fprintf(stderr, "CUDART version %d.%d does not support <cudaDeviceProp.canMapHostMemory> field\n", , CUDART_VERSION/1000, (CUDART_VERSION%100)/10);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exitsits
cudaDeviceReset();
exit(EXIT_SUCCESS);
#endif
#if CUDART_VERSION < 4000
if (bPinGenericMemory)
{
fprintf(stderr, "CUDART version %d.%d does not support <cudaHostRegister> function\n", CUDART_VERSION/1000, (CUDART_VERSION%100)/10);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exitsits
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
#endif
/* Allocate mapped CPU memory. */
nelem = 1048576;
bytes = nelem*sizeof(float);
if (bPinGenericMemory)
{
#if CUDART_VERSION >= 4000
a_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT);
b_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT);
c_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT);
// We need to ensure memory is aligned to 4K (so we will need to padd memory accordingly)
a = (float *) ALIGN_UP(a_UA, MEMORY_ALIGNMENT);
b = (float *) ALIGN_UP(b_UA, MEMORY_ALIGNMENT);
c = (float *) ALIGN_UP(c_UA, MEMORY_ALIGNMENT);
checkCudaErrors(cudaHostRegister(a, bytes, CU_MEMHOSTALLOC_DEVICEMAP));
checkCudaErrors(cudaHostRegister(b, bytes, CU_MEMHOSTALLOC_DEVICEMAP));
checkCudaErrors(cudaHostRegister(c, bytes, CU_MEMHOSTALLOC_DEVICEMAP));
#endif
}
else
{
#if CUDART_VERSION >= 2020
flags = cudaHostAllocMapped;
checkCudaErrors(cudaHostAlloc((void **)&a, bytes, flags));
checkCudaErrors(cudaHostAlloc((void **)&b, bytes, flags));
checkCudaErrors(cudaHostAlloc((void **)&c, bytes, flags));
#endif
}
/* Initialize the vectors. */
for (n = 0; n < nelem; n++)
{
a[n] = rand() / (float)RAND_MAX;
b[n] = rand() / (float)RAND_MAX;
}
/* Get the device pointers for the pinned CPU memory mapped into the GPU
memory space. */
#if CUDART_VERSION >= 2020
checkCudaErrors(cudaHostGetDevicePointer((void **)&d_a, (void *)a, 0));
checkCudaErrors(cudaHostGetDevicePointer((void **)&d_b, (void *)b, 0));
checkCudaErrors(cudaHostGetDevicePointer((void **)&d_c, (void *)c, 0));
#endif
/* Call the GPU kernel using the CPU pointers residing in CPU mapped memory. */
printf("> vectorAddGPU kernel will add vectors using mapped CPU memory...\n");
dim3 block(256);
dim3 grid((unsigned int)ceil(nelem/(float)block.x));
vectorAddGPU<<<grid, block>>>(d_a, d_b, d_c, nelem);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("vectorAddGPU() execution failed");
/* Compare the results */
printf("> Checking the results from vectorAddGPU() ...\n");
errorNorm = 0.f;
refNorm = 0.f;
for (n = 0; n < nelem; n++)
{
ref = a[n] + b[n];
diff = c[n] - ref;
errorNorm += diff*diff;
refNorm += ref*ref;
}
errorNorm = (float)sqrt((double)errorNorm);
refNorm = (float)sqrt((double)refNorm);
/* Memory clean up */
printf("> Releasing CPU memory...\n");
if (bPinGenericMemory)
{
#if CUDART_VERSION >= 4000
checkCudaErrors(cudaHostUnregister(a));
checkCudaErrors(cudaHostUnregister(b));
checkCudaErrors(cudaHostUnregister(c));
free(a_UA);
free(b_UA);
free(c_UA);
#endif
}
else
{
#if CUDART_VERSION >= 2020
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFreeHost(b));
checkCudaErrors(cudaFreeHost(c));
#endif
}
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exitsits
cudaDeviceReset();
exit(errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
712cb7ea70d43bda17f4d070758fec731a4cf60c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <thrust/complex.h>
#include "header_hip.cuh"
#include <vector>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void vecAdd(uchar4 *a, uchar4 *b, uchar4 *c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int aux;
if (id < n){
aux = a[id].x + b[id].x;
c[id].x = aux;
aux = a[id].y + b[id].y;
c[id].y = aux;
aux = a[id].z + b[id].z;
c[id].z = aux;
}
}
uchar4 *convert_uimg_to_uchar4(int width, int height, int channels, unsigned char *img){
uchar4 *output;
hipHostMalloc(&output,height* width* sizeof(uchar4));
int i,j;
for(i=0;i<height;i++){
for(j=0;j<width;j++){
output[i*width+j].x = img[i*width*channels+ j*channels];
output[i*width+j].y = img[i*width*channels+ j*channels + 1];
output[i*width+j].z = img[i*width*channels+ j*channels + 2];
output[i*width+j].w = 255;
}
}
return output;
}
void transfer_uchar4_uint(int width, int height, int channels, uchar4 *input, unsigned char *output){
int i,j;
for(i = 0;i < height; i++){
for(j=0;j<width;j++){
output[i*width*channels + j*channels] = input[i*width+j].x;
output[i*width*channels + j*channels+1] = input[i*width+j].y;
output[i*width*channels + j*channels+2] = input[i*width+j].z;
}
}
}
void compute_constants(thrust::complex<float> * constant, float sigma_h){
thrust::complex <float> sigma(sigma_h,0.00);
//gamma
constant[0] = alpha0*(unit+exp(-lambda0/sigma))/(unit-exp(-lambda0/sigma)) + alpha1*(unit+exp(-lambda1/sigma))/(unit-exp(-lambda1/sigma));
//a0 a1
constant[1] = alpha0/constant[0];
constant[2] = alpha1/constant[0];
//b0 b1
constant[3] = exp(-lambda0/sigma);
constant[4] = exp(-lambda1/sigma);
//r00 r01
constant[5] = (constant[3]-unit)*(constant[3]-unit)/(constant[1]*constant[3]);
constant[6] = constant[1]/(constant[3]-unit);
//r10 r11
constant[7] = (constant[4]-unit)*(constant[4]-unit)/(constant[2]*constant[4]);
constant[8] = constant[2]/(constant[4]-unit);
//theta b0 b1
constant[9] = atan(constant[3].imag()/constant[3].real());
constant[10] = atan(constant[4].imag()/constant[4].real());
//radius b0 b1
constant[11] = sqrtf(constant[3].real()*constant[3].real() + constant[3].imag()*constant[3].imag());
constant[12] = sqrtf(constant[4].real()*constant[4].real() + constant[4].imag()*constant[4].imag());
}
void image_kernel_call_horizontal(uchar4 *auximage,uchar4 *outputimage,float sigma_r,float sigma_h,thrust::complex <float> *constant, uchar4 *img,int width, int height, int channels ,int window_w, int window_h, float kappa){
hipStream_t stream1;
hipStream_t stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
int sharedpad=5;
int nv_blocks = ceil(height/(1.00*window_h));
dim3 grid(nv_blocks,1,1);
dim3 block(window_h,1,1);
float s_quotient = sigma_h/sigma_r;
s_quotient = s_quotient* s_quotient;
hipLaunchKernelGGL(( gaussian_filter_kernel_horizontal_causal), dim3(grid),dim3(block),(window_h*(window_w+sharedpad))*sizeof(uchar4),stream1, auximage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h, sharedpad, kappa);
hipLaunchKernelGGL(( gaussian_filter_kernel_horizontal_anticausal), dim3(grid),dim3(block),(window_h*(window_w+sharedpad))*sizeof(uchar4),stream2, outputimage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h,sharedpad, kappa);
hipDeviceSynchronize();
dim3 grid2(ceil(width*height/window_h),1,1);
dim3 block2(window_h,1,1);
hipLaunchKernelGGL(( vecAdd), dim3(grid2),dim3(block2), 0, 0, auximage,outputimage,outputimage,width*height);
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
}
void image_kernel_call_vertical(uchar4 *auximage,uchar4 *outputimage,float sigma_r,float sigma_h,thrust::complex <float> *constant, uchar4 *img,int width, int height, int channels ,int window_w, int window_h, float kappa){
hipStream_t stream1;
hipStream_t stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
int sharedpad=5;
int aux = window_w;
window_w = window_h;
window_h = aux;
int nh_blocks = ceil(width/(1.00*window_w));
dim3 grid(nh_blocks,1,1);
dim3 block(window_w,1,1);
float s_quotient = sigma_h/sigma_r;
s_quotient = s_quotient* s_quotient;
hipLaunchKernelGGL(( gaussian_filter_kernel_vertical_causal), dim3(grid),dim3(block),(window_h*(window_w+0))*sizeof(uchar4), 0, outputimage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h, 0,kappa);
hipLaunchKernelGGL(( gaussian_filter_kernel_vertical_anticausal), dim3(grid),dim3(block),(window_h*(window_w+0))*sizeof(uchar4), 0, auximage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h, 0,kappa);
hipDeviceSynchronize();
dim3 grid2(ceil(width*height/window_h),1,1);
dim3 block2(window_h,1,1);
hipLaunchKernelGGL(( vecAdd), dim3(grid2),dim3(block2), 0, 0, auximage,outputimage,outputimage,width*height);
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
}
void image_filter2d(float *exectime,float sigma_r,float sigma_h,uchar4 *inputimage,int width,int height, int channels,int window_w, int window_h, float kappa){
uchar4 *device_in, *device_out, *aux, *auximage;
float sigma;
float ratio = 1.00/2.00;
int num_it = 2,i;
thrust::complex <float> constant[13];
thrust::complex <float> *device_constant;
hipMalloc(&device_constant, 13*sizeof(thrust::complex<float>));
hipMalloc(&device_in,width*height*sizeof(uchar4));
hipMalloc(&auximage,width*height*sizeof(uchar4));
hipMalloc(&device_out,width*height*sizeof(uchar4));
sigma = sigma_h*sqrtf((ratio*ratio-1)/(powf(ratio,2*(2*num_it))-1));
hipMemcpy(device_in, inputimage, width*height*sizeof(uchar4), hipMemcpyHostToDevice);
auto start = high_resolution_clock::now();
for(i=0;i<num_it ;i++){
compute_constants(constant,sigma);
hipMemcpy(device_constant, constant, 13*sizeof(thrust::complex<float>), hipMemcpyHostToDevice);
image_kernel_call_horizontal(auximage,device_out,sigma_r,sigma_h,device_constant,device_in,width,height,channels,window_w,window_h,kappa);
aux = device_out;
device_out = device_in;
device_in = aux;
sigma = sigma*ratio;
compute_constants(constant,sigma);
hipMemcpy(device_constant, constant, 13*sizeof(thrust::complex<float>), hipMemcpyHostToDevice);
image_kernel_call_vertical(auximage,device_out,sigma_r,sigma_h,device_constant,device_in,width,height,channels,window_w,window_h,kappa);
aux = device_out;
device_out = device_in;
device_in = aux;
sigma = sigma*ratio;
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
*exectime = duration.count()/1000.00;
hipMemcpy(inputimage, aux, width*height*sizeof(int), hipMemcpyDeviceToHost);
hipFree(device_constant);
hipFree(device_in);
hipFree(device_out);
hipFree(auximage);
hipDeviceSynchronize();
} | 712cb7ea70d43bda17f4d070758fec731a4cf60c.cu | #include <iostream>
#include <thrust/complex.h>
#include "header.cuh"
#include <vector>
#include <time.h>
#include <cuda_runtime.h>
#include <math.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void vecAdd(uchar4 *a, uchar4 *b, uchar4 *c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int aux;
if (id < n){
aux = a[id].x + b[id].x;
c[id].x = aux;
aux = a[id].y + b[id].y;
c[id].y = aux;
aux = a[id].z + b[id].z;
c[id].z = aux;
}
}
uchar4 *convert_uimg_to_uchar4(int width, int height, int channels, unsigned char *img){
uchar4 *output;
cudaMallocHost(&output,height* width* sizeof(uchar4));
int i,j;
for(i=0;i<height;i++){
for(j=0;j<width;j++){
output[i*width+j].x = img[i*width*channels+ j*channels];
output[i*width+j].y = img[i*width*channels+ j*channels + 1];
output[i*width+j].z = img[i*width*channels+ j*channels + 2];
output[i*width+j].w = 255;
}
}
return output;
}
void transfer_uchar4_uint(int width, int height, int channels, uchar4 *input, unsigned char *output){
int i,j;
for(i = 0;i < height; i++){
for(j=0;j<width;j++){
output[i*width*channels + j*channels] = input[i*width+j].x;
output[i*width*channels + j*channels+1] = input[i*width+j].y;
output[i*width*channels + j*channels+2] = input[i*width+j].z;
}
}
}
void compute_constants(thrust::complex<float> * constant, float sigma_h){
thrust::complex <float> sigma(sigma_h,0.00);
//gamma
constant[0] = alpha0*(unit+exp(-lambda0/sigma))/(unit-exp(-lambda0/sigma)) + alpha1*(unit+exp(-lambda1/sigma))/(unit-exp(-lambda1/sigma));
//a0 a1
constant[1] = alpha0/constant[0];
constant[2] = alpha1/constant[0];
//b0 b1
constant[3] = exp(-lambda0/sigma);
constant[4] = exp(-lambda1/sigma);
//r00 r01
constant[5] = (constant[3]-unit)*(constant[3]-unit)/(constant[1]*constant[3]);
constant[6] = constant[1]/(constant[3]-unit);
//r10 r11
constant[7] = (constant[4]-unit)*(constant[4]-unit)/(constant[2]*constant[4]);
constant[8] = constant[2]/(constant[4]-unit);
//theta b0 b1
constant[9] = atan(constant[3].imag()/constant[3].real());
constant[10] = atan(constant[4].imag()/constant[4].real());
//radius b0 b1
constant[11] = sqrtf(constant[3].real()*constant[3].real() + constant[3].imag()*constant[3].imag());
constant[12] = sqrtf(constant[4].real()*constant[4].real() + constant[4].imag()*constant[4].imag());
}
void image_kernel_call_horizontal(uchar4 *auximage,uchar4 *outputimage,float sigma_r,float sigma_h,thrust::complex <float> *constant, uchar4 *img,int width, int height, int channels ,int window_w, int window_h, float kappa){
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
int sharedpad=5;
int nv_blocks = ceil(height/(1.00*window_h));
dim3 grid(nv_blocks,1,1);
dim3 block(window_h,1,1);
float s_quotient = sigma_h/sigma_r;
s_quotient = s_quotient* s_quotient;
gaussian_filter_kernel_horizontal_causal<<<grid,block,(window_h*(window_w+sharedpad))*sizeof(uchar4),stream1>>>(auximage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h, sharedpad, kappa);
gaussian_filter_kernel_horizontal_anticausal<<<grid,block,(window_h*(window_w+sharedpad))*sizeof(uchar4),stream2>>>(outputimage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h,sharedpad, kappa);
cudaDeviceSynchronize();
dim3 grid2(ceil(width*height/window_h),1,1);
dim3 block2(window_h,1,1);
vecAdd<<<grid2,block2>>>(auximage,outputimage,outputimage,width*height);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
}
void image_kernel_call_vertical(uchar4 *auximage,uchar4 *outputimage,float sigma_r,float sigma_h,thrust::complex <float> *constant, uchar4 *img,int width, int height, int channels ,int window_w, int window_h, float kappa){
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
int sharedpad=5;
int aux = window_w;
window_w = window_h;
window_h = aux;
int nh_blocks = ceil(width/(1.00*window_w));
dim3 grid(nh_blocks,1,1);
dim3 block(window_w,1,1);
float s_quotient = sigma_h/sigma_r;
s_quotient = s_quotient* s_quotient;
gaussian_filter_kernel_vertical_causal<<<grid,block,(window_h*(window_w+0))*sizeof(uchar4)>>>(outputimage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h, 0,kappa);
gaussian_filter_kernel_vertical_anticausal<<<grid,block,(window_h*(window_w+0))*sizeof(uchar4)>>>(auximage,sigma_h,s_quotient,constant,img,width, height, channels,window_w,window_h, 0,kappa);
cudaDeviceSynchronize();
dim3 grid2(ceil(width*height/window_h),1,1);
dim3 block2(window_h,1,1);
vecAdd<<<grid2,block2>>>(auximage,outputimage,outputimage,width*height);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
}
void image_filter2d(float *exectime,float sigma_r,float sigma_h,uchar4 *inputimage,int width,int height, int channels,int window_w, int window_h, float kappa){
uchar4 *device_in, *device_out, *aux, *auximage;
float sigma;
float ratio = 1.00/2.00;
int num_it = 2,i;
thrust::complex <float> constant[13];
thrust::complex <float> *device_constant;
cudaMalloc(&device_constant, 13*sizeof(thrust::complex<float>));
cudaMalloc(&device_in,width*height*sizeof(uchar4));
cudaMalloc(&auximage,width*height*sizeof(uchar4));
cudaMalloc(&device_out,width*height*sizeof(uchar4));
sigma = sigma_h*sqrtf((ratio*ratio-1)/(powf(ratio,2*(2*num_it))-1));
cudaMemcpy(device_in, inputimage, width*height*sizeof(uchar4), cudaMemcpyHostToDevice);
auto start = high_resolution_clock::now();
for(i=0;i<num_it ;i++){
compute_constants(constant,sigma);
cudaMemcpy(device_constant, constant, 13*sizeof(thrust::complex<float>), cudaMemcpyHostToDevice);
image_kernel_call_horizontal(auximage,device_out,sigma_r,sigma_h,device_constant,device_in,width,height,channels,window_w,window_h,kappa);
aux = device_out;
device_out = device_in;
device_in = aux;
sigma = sigma*ratio;
compute_constants(constant,sigma);
cudaMemcpy(device_constant, constant, 13*sizeof(thrust::complex<float>), cudaMemcpyHostToDevice);
image_kernel_call_vertical(auximage,device_out,sigma_r,sigma_h,device_constant,device_in,width,height,channels,window_w,window_h,kappa);
aux = device_out;
device_out = device_in;
device_in = aux;
sigma = sigma*ratio;
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
*exectime = duration.count()/1000.00;
cudaMemcpy(inputimage, aux, width*height*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_constant);
cudaFree(device_in);
cudaFree(device_out);
cudaFree(auximage);
cudaDeviceSynchronize();
} |
65b5852d2f836f4cb0b646a40223c07a9a01107c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <numeric>
#include <vector>
#include <iostream>
#include <iomanip>
void printArrayAsCharMatrix(const float *in, const size_t &width, const size_t &height)
{
std::cout << std::endl;
char buffer[4];
// int ret;
for (size_t j = 0; j < height; ++j)
{
for (size_t i = 0; i < width; ++i)
{
// ret = snprintf(buffer, sizeof buffer, "%f", in[width * j + i]);
// if (ret < 0) {
// return EXIT_FAILURE;
// }
// if (ret >= sizeof buffer) {
// }
std::cout << buffer[0] << ","
<< buffer[1] << ","
<< buffer[2] << ","
<< buffer[3]
<< ' ';
}
std::cout << std::endl;
}
}
void printArrayAsMatrix(const float *in, const size_t &width, const size_t &height)
{
std::cout << std::endl;
for (size_t j = 0; j < height; ++j)
{
for (size_t i = 0; i < width; ++i)
{
std::cout << std::fixed
<< std::setw(5) // space between numbers
<< std::setprecision(2) // nubmers after decimal point
<< in[width * j + i] << ' ';
}
std::cout << std::endl;
}
}
__global__ void copyKernel(hipSurfaceObject_t inputSurfObj,
hipSurfaceObject_t outputSurfObj,
int width, int height)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = y * width + x;
if (x < width && y < height)
{
float data;
// Read from input surface
surf2Dread(&data, inputSurfObj, x * 4, y);
if (idx == 100)
printf("%f\n", data);
// Write to output surface
data += 2;
surf2Dwrite(data, outputSurfObj, x * 4, y);
}
}
__global__ void printKernel(hipSurfaceObject_t inputSurfObj,
int width, int height, int depth)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int idx = z * width * height + y * width + x;
if (x < width && y < height && z < depth)
{
float data;
// Read from input surface
surf3Dread(&data, inputSurfObj, x * sizeof(float), y, z);
printf("(%d,%d,%d):%d = %f\n", x, y, z, idx, data);
// Write to output surface
}
}
__global__ void add70(hipSurfaceObject_t inputSurfObj,
int width, int height, int depth)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int idx = z * width * height + y * width + x;
if (x < width && y < height && z < depth)
{
float data;
// Read from input surface
surf3Dread(&data, inputSurfObj, x * sizeof(float), y, z);
printf("(%d,%d,%d):%d = %f\n", x, y, z, idx, data);
// Write to output surface
surf3Dwrite(data + 70, inputSurfObj, x * sizeof(float), y, z);
}
}
int main()
{
// Inputs
size_t width = 10;
size_t height = 10;
size_t depth = 10;
size_t size = width * height * depth * sizeof(float);
// Initialize host array
float *h_data = (float *)malloc(size);
for (int z = 0; z < depth; ++z)
for (int y = 0; y < width; ++y)
for (int x = 0; x < height; ++x)
h_data[width * height * z + height * y + x] = (float)x * 1 + (float)y * 1000 + (float)z * 1000000;
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(32, 0, 0, 0,
hipChannelFormatKindFloat);
hipExtent extent = make_hipExtent(width, height, depth);
hipArray *cuInputArray; //may have to use hipArray3DCreate, the descriptor is automatically generated
hipMalloc3DArray(&cuInputArray, &channelDesc, extent,
hipArraySurfaceLoadStore);
hipArray *cuOutputArray;
hipMalloc3DArray(&cuOutputArray, &channelDesc, extent,
hipArraySurfaceLoadStore);
// checkCudaErrors(hipMemcpyToArray(cu_array, 0, 0, h_data, size,
// hipMemcpyHostToDevice));
// Copy to device memory some data located at address h_data
// in host memory
hipMemcpy3DParms memcpyparmsHtoD = {0};
// memcpyparmsHtoD.srcPtr = h_data;
memcpyparmsHtoD.srcPtr = make_hipPitchedPtr(h_data, width * sizeof(float), height, depth);
memcpyparmsHtoD.dstArray = cuInputArray;
memcpyparmsHtoD.extent = extent;
// memcpyparmsHtoD.extent = make_hipExtent(width * sizeof(float), height, depth);
memcpyparmsHtoD.kind = hipMemcpyHostToDevice;
hipMemcpy3D(&memcpyparmsHtoD);
// hipMemcpyToArray(cuInputArray, 0, 0, h_data, size,
// hipMemcpyHostToDevice);
// Create the surface objects
// Specify surface
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuInputArray;
hipSurfaceObject_t inputSurfObj = 0;
hipCreateSurfaceObject(&inputSurfObj, &resDesc);
// Invoke kernel
dim3 dimBlock(4, 4, 4);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y,
(depth + dimBlock.z - 1) / dimBlock.z);
// Copy from original surface and add 70
std::cout << "Printing" << std::endl;
hipLaunchKernelGGL(( printKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, inputSurfObj, width, height, depth);
std::cout << "Adding..." << std::endl;
hipLaunchKernelGGL(( add70), dim3(dimGrid), dim3(dimBlock), 0, 0, inputSurfObj, width, height, depth);
std::cout << "Printing" << std::endl;
hipLaunchKernelGGL(( printKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, inputSurfObj, width, height, depth);
// Allocate output buffer in device memory
float *d_output;
checkCudaErrors(hipMalloc(&d_output, size));
// Copy device to device
hipMemcpy3DParms memcpyparmsDtoD = {0};
// memcpyparmsHtoD.srcPtr = h_data;
memcpyparmsDtoD.dstPtr = make_hipPitchedPtr(d_output, width * sizeof(float), height, depth);
memcpyparmsDtoD.srcArray = cuInputArray;
memcpyparmsDtoD.extent = extent;
// memcpyparmsHtoD.extent = make_hipExtent(width * sizeof(float), height, depth);
memcpyparmsDtoD.kind = hipMemcpyDeviceToDevice;
hipMemcpy3D(&memcpyparmsDtoD);
checkCudaErrors(hipMemcpy(h_data, d_output, size, hipMemcpyDeviceToHost));
// Print new host data
for (int z = 0; z < depth; ++z)
{
std::cout << std::endl;
for (int y = 0; y < width; ++y)
{
std::cout << std::endl;
for (int x = 0; x < height; ++x)
{
std::cout << std::fixed << std::setw(10) << std::setprecision(1)
<< h_data[width * height * z + height * y + x] << ", ";
}
}
}
std::cout << std::endl;
// Destroy surface objects
hipDestroySurfaceObject(inputSurfObj);
// Free device memory
checkCudaErrors(hipFreeArray(cuInputArray));
checkCudaErrors(hipFreeArray(cuOutputArray));
checkCudaErrors(hipFree(d_output));
// Free other
free(h_data);
return 0;
}
| 65b5852d2f836f4cb0b646a40223c07a9a01107c.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <numeric>
#include <vector>
#include <iostream>
#include <iomanip>
void printArrayAsCharMatrix(const float *in, const size_t &width, const size_t &height)
{
std::cout << std::endl;
char buffer[4];
// int ret;
for (size_t j = 0; j < height; ++j)
{
for (size_t i = 0; i < width; ++i)
{
// ret = snprintf(buffer, sizeof buffer, "%f", in[width * j + i]);
// if (ret < 0) {
// return EXIT_FAILURE;
// }
// if (ret >= sizeof buffer) {
// }
std::cout << buffer[0] << ","
<< buffer[1] << ","
<< buffer[2] << ","
<< buffer[3]
<< ' ';
}
std::cout << std::endl;
}
}
void printArrayAsMatrix(const float *in, const size_t &width, const size_t &height)
{
std::cout << std::endl;
for (size_t j = 0; j < height; ++j)
{
for (size_t i = 0; i < width; ++i)
{
std::cout << std::fixed
<< std::setw(5) // space between numbers
<< std::setprecision(2) // nubmers after decimal point
<< in[width * j + i] << ' ';
}
std::cout << std::endl;
}
}
__global__ void copyKernel(cudaSurfaceObject_t inputSurfObj,
cudaSurfaceObject_t outputSurfObj,
int width, int height)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = y * width + x;
if (x < width && y < height)
{
float data;
// Read from input surface
surf2Dread(&data, inputSurfObj, x * 4, y);
if (idx == 100)
printf("%f\n", data);
// Write to output surface
data += 2;
surf2Dwrite(data, outputSurfObj, x * 4, y);
}
}
__global__ void printKernel(cudaSurfaceObject_t inputSurfObj,
int width, int height, int depth)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int idx = z * width * height + y * width + x;
if (x < width && y < height && z < depth)
{
float data;
// Read from input surface
surf3Dread(&data, inputSurfObj, x * sizeof(float), y, z);
printf("(%d,%d,%d):%d = %f\n", x, y, z, idx, data);
// Write to output surface
}
}
__global__ void add70(cudaSurfaceObject_t inputSurfObj,
int width, int height, int depth)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int idx = z * width * height + y * width + x;
if (x < width && y < height && z < depth)
{
float data;
// Read from input surface
surf3Dread(&data, inputSurfObj, x * sizeof(float), y, z);
printf("(%d,%d,%d):%d = %f\n", x, y, z, idx, data);
// Write to output surface
surf3Dwrite(data + 70, inputSurfObj, x * sizeof(float), y, z);
}
}
int main()
{
// Inputs
size_t width = 10;
size_t height = 10;
size_t depth = 10;
size_t size = width * height * depth * sizeof(float);
// Initialize host array
float *h_data = (float *)malloc(size);
for (int z = 0; z < depth; ++z)
for (int y = 0; y < width; ++y)
for (int x = 0; x < height; ++x)
h_data[width * height * z + height * y + x] = (float)x * 1 + (float)y * 1000 + (float)z * 1000000;
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32, 0, 0, 0,
cudaChannelFormatKindFloat);
cudaExtent extent = make_cudaExtent(width, height, depth);
cudaArray *cuInputArray; //may have to use cuArray3DCreate, the descriptor is automatically generated
cudaMalloc3DArray(&cuInputArray, &channelDesc, extent,
cudaArraySurfaceLoadStore);
cudaArray *cuOutputArray;
cudaMalloc3DArray(&cuOutputArray, &channelDesc, extent,
cudaArraySurfaceLoadStore);
// checkCudaErrors(cudaMemcpyToArray(cu_array, 0, 0, h_data, size,
// cudaMemcpyHostToDevice));
// Copy to device memory some data located at address h_data
// in host memory
cudaMemcpy3DParms memcpyparmsHtoD = {0};
// memcpyparmsHtoD.srcPtr = h_data;
memcpyparmsHtoD.srcPtr = make_cudaPitchedPtr(h_data, width * sizeof(float), height, depth);
memcpyparmsHtoD.dstArray = cuInputArray;
memcpyparmsHtoD.extent = extent;
// memcpyparmsHtoD.extent = make_cudaExtent(width * sizeof(float), height, depth);
memcpyparmsHtoD.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&memcpyparmsHtoD);
// cudaMemcpyToArray(cuInputArray, 0, 0, h_data, size,
// cudaMemcpyHostToDevice);
// Create the surface objects
// Specify surface
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuInputArray;
cudaSurfaceObject_t inputSurfObj = 0;
cudaCreateSurfaceObject(&inputSurfObj, &resDesc);
// Invoke kernel
dim3 dimBlock(4, 4, 4);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y,
(depth + dimBlock.z - 1) / dimBlock.z);
// Copy from original surface and add 70
std::cout << "Printing" << std::endl;
printKernel<<<dimGrid, dimBlock>>>(inputSurfObj, width, height, depth);
std::cout << "Adding..." << std::endl;
add70<<<dimGrid, dimBlock>>>(inputSurfObj, width, height, depth);
std::cout << "Printing" << std::endl;
printKernel<<<dimGrid, dimBlock>>>(inputSurfObj, width, height, depth);
// Allocate output buffer in device memory
float *d_output;
checkCudaErrors(cudaMalloc(&d_output, size));
// Copy device to device
cudaMemcpy3DParms memcpyparmsDtoD = {0};
// memcpyparmsHtoD.srcPtr = h_data;
memcpyparmsDtoD.dstPtr = make_cudaPitchedPtr(d_output, width * sizeof(float), height, depth);
memcpyparmsDtoD.srcArray = cuInputArray;
memcpyparmsDtoD.extent = extent;
// memcpyparmsHtoD.extent = make_cudaExtent(width * sizeof(float), height, depth);
memcpyparmsDtoD.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(&memcpyparmsDtoD);
checkCudaErrors(cudaMemcpy(h_data, d_output, size, cudaMemcpyDeviceToHost));
// Print new host data
for (int z = 0; z < depth; ++z)
{
std::cout << std::endl;
for (int y = 0; y < width; ++y)
{
std::cout << std::endl;
for (int x = 0; x < height; ++x)
{
std::cout << std::fixed << std::setw(10) << std::setprecision(1)
<< h_data[width * height * z + height * y + x] << ", ";
}
}
}
std::cout << std::endl;
// Destroy surface objects
cudaDestroySurfaceObject(inputSurfObj);
// Free device memory
checkCudaErrors(cudaFreeArray(cuInputArray));
checkCudaErrors(cudaFreeArray(cuOutputArray));
checkCudaErrors(cudaFree(d_output));
// Free other
free(h_data);
return 0;
}
|
c5bc7c3e01f66ecb30f82361dcfd50d836aa4a63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 07.03.2019
//
#include <ops/declarable/helpers/gather.h>
#include <numeric>
#include <PointersManager.h>
#include <ShapeUtils.h>
namespace nd4j {
namespace ops {
namespace helpers {
template<typename X, typename Y, typename Z>
__global__ static void gatherCudaLinearKernel(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
__shared__ const X* x;
__shared__ const Y* y;
__shared__ Z* z;
__shared__ Nd4jLong xLen, yLen, zLen;
if (threadIdx.x == 0) {
x = reinterpret_cast<const X*>(vx);
z = reinterpret_cast<Z*>(vz);
y = reinterpret_cast<const Y *>(vy);
xLen = shape::length(xShapeInfo);
yLen = shape::length(yShapeInfo);
zLen = shape::length(zShapeInfo);
}
__syncthreads();
//const Nd4jLong zLen = shape::length(zShapeInfo);
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int j = start; j < zLen; j += step) {
auto zIndex = shape::getIndexOffset(j, zShapeInfo, zLen);
auto yIndex = shape::getIndexOffset(j, yShapeInfo, yLen);
auto xIndex = shape::getIndexOffset(y[yIndex], xShapeInfo, xLen);
//printf("%lld , %lld\n", zIndex, xIndex);
z[zIndex] = x[xIndex];
}
}
//////////////////////////////////////////////////////////////////////
template<typename X, typename Y, typename Z>
__global__ static void gatherCuda(const int numOfSubArrs,
const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) {
const Y* y = reinterpret_cast<const Y*>(vy);
__shared__ const X* x;
__shared__ Z* z;
const Nd4jLong len = shape::length(xShapeInfo);
//const Nd4jLong zLen = shape::length(zShapeInfo);
for (int i = blockIdx.x; i < numOfSubArrs; i += gridDim.x) {
if (threadIdx.x == 0) {
x = reinterpret_cast<const X*>(vx) + xOffsets[y[shape::getIndexOffset(i, yShapeInfo, numOfSubArrs)]];
z = reinterpret_cast<Z*>(vz) + zOffsets[i];
}
__syncthreads();
for (int j = threadIdx.x; j < len; j += blockDim.x) {
auto zIndex = shape::getIndexOffset(j, zShapeInfo, len);
auto xIndex = shape::getIndexOffset(j, xShapeInfo, len);
//printf("%lld , %lld\n", zIndex, xIndex);
z[zIndex] = x[xIndex];
}
__syncthreads();
}
}
template<typename X, typename Y, typename Z>
__host__ static void gatherCudaLinear(const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
hipLaunchKernelGGL(( gatherCudaLinearKernel<X,Y,Z>), dim3(128), dim3(256), 1024, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
//////////////////////////////////////////////////////////////////////
template<typename X, typename Y, typename Z>
__host__ static void gatherCudaLauncher(const hipStream_t *stream, const int numOfSubArrs,
const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) {
hipLaunchKernelGGL(( gatherCuda<X,Y,Z>), dim3(numOfSubArrs), dim3(MAX_NUM_THREADS), 1024, *stream, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, vz, zShapeInfo, zOffsets);
}
//////////////////////////////////////////////////////////////////////
void gather(nd4j::LaunchContext * context, const NDArray* input, const NDArray* indices, NDArray* output, const std::vector<int>& intArgs) {
const int inputRank = input->rankOf();
int axis = intArgs.size() > 0 ? intArgs[0] : 0;
if(axis < 0)
axis += inputRank;
const int numOfIntArgs = intArgs.size();
if (indices == nullptr && numOfIntArgs == 2) { // scalar case
output->assign((*input)(intArgs[1], {axis}));
}
else if (indices != nullptr && indices->isScalar()) {
if(input->rankOf() <= 1) { //For scalar indices, rank 0 or 1 input: can't do tensor along dimension 0 as this is whole array... instead, we want to get a scalar
auto idx = indices->e<Nd4jLong>(0);
auto scalarNDArray = input->e(idx);
output->assign(scalarNDArray);
}
else {
NDArray inSubArr = (*input)(indices->e<Nd4jLong>(0), {axis});
output->assign(inSubArr);
}
}
else {
NDArray* pIndices = const_cast<NDArray*>(indices);
if(indices == nullptr)
pIndices = new NDArray(input->ordering(), {numOfIntArgs-1}, std::vector<double>(intArgs.begin() + 1, intArgs.end()), DataType::INT64, input->getContext());
std::vector<int> dimsOut(pIndices->rankOf());
std::iota(dimsOut.begin(), dimsOut.end(), axis); // fill with axis, axis+1, ... axis+pIndices->rankOf()-1
const Nd4jLong numOfSubArrs = pIndices->lengthOf();
Nd4jLong *outSubArrShapeInfo(nullptr), *inSubArrShapeInfo(nullptr), *outSubArrOffsets(nullptr), *inSubArrOffsets(nullptr);
input-> getSubArrShapeAndOffsets({axis}, inSubArrShapeInfo, inSubArrOffsets);
output->getSubArrShapeAndOffsets(dimsOut, outSubArrShapeInfo, outSubArrOffsets);
if (output->rankOf() > 1) {
PointersManager manager(context, "gather");
auto xShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrShapeInfo,
shape::shapeInfoByteLength(
inSubArrShapeInfo)));
auto zShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrShapeInfo,
shape::shapeInfoByteLength(
outSubArrShapeInfo)));
auto xOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrOffsets, (input->lengthOf() /
shape::length(
inSubArrShapeInfo)) *
sizeof(Nd4jLong)));
auto zOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrOffsets,
(output->lengthOf() /
shape::length(outSubArrShapeInfo)) *
sizeof(Nd4jLong)));
NDArray::prepareSpecialUse({output}, {input, pIndices});
BUILD_TRIPLE_SELECTOR(input->dataType(), pIndices->dataType(), output->dataType(), gatherCudaLauncher,
(context->getCudaStream(), numOfSubArrs, input->getSpecialBuffer(), xShapeInfo, xOffsets, pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->getSpecialBuffer(), zShapeInfo, zOffsets),
NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
NDArray::registerSpecialUse({output}, {input, pIndices});
manager.synchronize();
}
else {
NDArray::prepareSpecialUse({output}, {input, pIndices});
BUILD_TRIPLE_SELECTOR(input->dataType(), pIndices->dataType(), output->dataType(), gatherCudaLinear,
(context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()),
NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
NDArray::registerSpecialUse({output}, {input, pIndices});
}
if(indices == nullptr)
delete pIndices;
}
}
BUILD_TRIPLE_TEMPLATE(template void gatherCudaLauncher, (const hipStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets), NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
BUILD_TRIPLE_TEMPLATE(template void gatherCudaLinear, (const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo), NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
}
}
} | c5bc7c3e01f66ecb30f82361dcfd50d836aa4a63.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 07.03.2019
//
#include <ops/declarable/helpers/gather.h>
#include <numeric>
#include <PointersManager.h>
#include <ShapeUtils.h>
namespace nd4j {
namespace ops {
namespace helpers {
template<typename X, typename Y, typename Z>
__global__ static void gatherCudaLinearKernel(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
__shared__ const X* x;
__shared__ const Y* y;
__shared__ Z* z;
__shared__ Nd4jLong xLen, yLen, zLen;
if (threadIdx.x == 0) {
x = reinterpret_cast<const X*>(vx);
z = reinterpret_cast<Z*>(vz);
y = reinterpret_cast<const Y *>(vy);
xLen = shape::length(xShapeInfo);
yLen = shape::length(yShapeInfo);
zLen = shape::length(zShapeInfo);
}
__syncthreads();
//const Nd4jLong zLen = shape::length(zShapeInfo);
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int j = start; j < zLen; j += step) {
auto zIndex = shape::getIndexOffset(j, zShapeInfo, zLen);
auto yIndex = shape::getIndexOffset(j, yShapeInfo, yLen);
auto xIndex = shape::getIndexOffset(y[yIndex], xShapeInfo, xLen);
//printf("%lld , %lld\n", zIndex, xIndex);
z[zIndex] = x[xIndex];
}
}
//////////////////////////////////////////////////////////////////////
template<typename X, typename Y, typename Z>
__global__ static void gatherCuda(const int numOfSubArrs,
const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) {
const Y* y = reinterpret_cast<const Y*>(vy);
__shared__ const X* x;
__shared__ Z* z;
const Nd4jLong len = shape::length(xShapeInfo);
//const Nd4jLong zLen = shape::length(zShapeInfo);
for (int i = blockIdx.x; i < numOfSubArrs; i += gridDim.x) {
if (threadIdx.x == 0) {
x = reinterpret_cast<const X*>(vx) + xOffsets[y[shape::getIndexOffset(i, yShapeInfo, numOfSubArrs)]];
z = reinterpret_cast<Z*>(vz) + zOffsets[i];
}
__syncthreads();
for (int j = threadIdx.x; j < len; j += blockDim.x) {
auto zIndex = shape::getIndexOffset(j, zShapeInfo, len);
auto xIndex = shape::getIndexOffset(j, xShapeInfo, len);
//printf("%lld , %lld\n", zIndex, xIndex);
z[zIndex] = x[xIndex];
}
__syncthreads();
}
}
template<typename X, typename Y, typename Z>
__host__ static void gatherCudaLinear(const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
gatherCudaLinearKernel<X,Y,Z><<<128, 256, 1024, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
//////////////////////////////////////////////////////////////////////
template<typename X, typename Y, typename Z>
__host__ static void gatherCudaLauncher(const cudaStream_t *stream, const int numOfSubArrs,
const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) {
gatherCuda<X,Y,Z><<<numOfSubArrs, MAX_NUM_THREADS, 1024, *stream>>>(numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, vz, zShapeInfo, zOffsets);
}
//////////////////////////////////////////////////////////////////////
void gather(nd4j::LaunchContext * context, const NDArray* input, const NDArray* indices, NDArray* output, const std::vector<int>& intArgs) {
const int inputRank = input->rankOf();
int axis = intArgs.size() > 0 ? intArgs[0] : 0;
if(axis < 0)
axis += inputRank;
const int numOfIntArgs = intArgs.size();
if (indices == nullptr && numOfIntArgs == 2) { // scalar case
output->assign((*input)(intArgs[1], {axis}));
}
else if (indices != nullptr && indices->isScalar()) {
if(input->rankOf() <= 1) { //For scalar indices, rank 0 or 1 input: can't do tensor along dimension 0 as this is whole array... instead, we want to get a scalar
auto idx = indices->e<Nd4jLong>(0);
auto scalarNDArray = input->e(idx);
output->assign(scalarNDArray);
}
else {
NDArray inSubArr = (*input)(indices->e<Nd4jLong>(0), {axis});
output->assign(inSubArr);
}
}
else {
NDArray* pIndices = const_cast<NDArray*>(indices);
if(indices == nullptr)
pIndices = new NDArray(input->ordering(), {numOfIntArgs-1}, std::vector<double>(intArgs.begin() + 1, intArgs.end()), DataType::INT64, input->getContext());
std::vector<int> dimsOut(pIndices->rankOf());
std::iota(dimsOut.begin(), dimsOut.end(), axis); // fill with axis, axis+1, ... axis+pIndices->rankOf()-1
const Nd4jLong numOfSubArrs = pIndices->lengthOf();
Nd4jLong *outSubArrShapeInfo(nullptr), *inSubArrShapeInfo(nullptr), *outSubArrOffsets(nullptr), *inSubArrOffsets(nullptr);
input-> getSubArrShapeAndOffsets({axis}, inSubArrShapeInfo, inSubArrOffsets);
output->getSubArrShapeAndOffsets(dimsOut, outSubArrShapeInfo, outSubArrOffsets);
if (output->rankOf() > 1) {
PointersManager manager(context, "gather");
auto xShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrShapeInfo,
shape::shapeInfoByteLength(
inSubArrShapeInfo)));
auto zShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrShapeInfo,
shape::shapeInfoByteLength(
outSubArrShapeInfo)));
auto xOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrOffsets, (input->lengthOf() /
shape::length(
inSubArrShapeInfo)) *
sizeof(Nd4jLong)));
auto zOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrOffsets,
(output->lengthOf() /
shape::length(outSubArrShapeInfo)) *
sizeof(Nd4jLong)));
NDArray::prepareSpecialUse({output}, {input, pIndices});
BUILD_TRIPLE_SELECTOR(input->dataType(), pIndices->dataType(), output->dataType(), gatherCudaLauncher,
(context->getCudaStream(), numOfSubArrs, input->getSpecialBuffer(), xShapeInfo, xOffsets, pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->getSpecialBuffer(), zShapeInfo, zOffsets),
NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
NDArray::registerSpecialUse({output}, {input, pIndices});
manager.synchronize();
}
else {
NDArray::prepareSpecialUse({output}, {input, pIndices});
BUILD_TRIPLE_SELECTOR(input->dataType(), pIndices->dataType(), output->dataType(), gatherCudaLinear,
(context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()),
NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
NDArray::registerSpecialUse({output}, {input, pIndices});
}
if(indices == nullptr)
delete pIndices;
}
}
BUILD_TRIPLE_TEMPLATE(template void gatherCudaLauncher, (const cudaStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets), NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
BUILD_TRIPLE_TEMPLATE(template void gatherCudaLinear, (const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo), NUMERIC_TYPES, INTEGER_TYPES, NUMERIC_TYPES);
}
}
} |
f4e4fb800e7c5e3a7065ee79142cdd401151b32b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2019 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <iostream>
#include "planning_block.h"
namespace apollo {
namespace planning {
bool InitialCuda() {
int dev = 0;
hipDeviceProp_t deviceProp;
CUDA_CHECK(hipGetDeviceProperties(&deviceProp, dev));
CUDA_CHECK(hipSetDevice(dev));
return true;
}
__global__ void fill_lower_left_gpu(int *iRow, int *jCol, unsigned int *rind_L,
unsigned int *cind_L, const int nnz_L) {
int i = threadIdx.x;
if (i < nnz_L) {
iRow[i] = rind_L[i];
jCol[i] = cind_L[i];
}
}
template <typename T>
__global__ void data_transfer_gpu(T *dst, const T *src, const int size) {
int i = threadIdx.x;
if (i < size) {
dst[i] = src[i];
}
}
bool fill_lower_left(int *iRow, int *jCol, unsigned int *rind_L,
unsigned int *cind_L, const int nnz_L) {
if (!InitialCuda()) return false;
int *d_iRow, *d_jCol;
unsigned int *d_rind_L, *d_cind_L;
unsigned int nBytes = nnz_L * sizeof(int);
unsigned int nUBytes = nnz_L * sizeof(unsigned int);
hipMalloc((void **)&d_iRow, nBytes);
hipMalloc((void **)&d_jCol, nBytes);
hipMalloc((void **)&d_rind_L, nUBytes);
hipMalloc((void **)&d_cind_L, nUBytes);
hipMemcpy(d_iRow, iRow, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_jCol, jCol, nBytes, hipMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((nnz_L + block.x - 1) / block.x);
hipLaunchKernelGGL(( fill_lower_left_gpu), dim3(grid), dim3(block), 0, 0, d_iRow, d_jCol, d_rind_L, d_cind_L,
nnz_L);
hipDeviceSynchronize();
hipMemcpy(rind_L, d_rind_L, nUBytes, hipMemcpyDeviceToHost);
hipMemcpy(cind_L, d_cind_L, nUBytes, hipMemcpyDeviceToHost);
hipFree(d_iRow);
hipFree(d_jCol);
hipFree(d_rind_L);
hipFree(d_cind_L);
hipDeviceReset();
return true;
}
template <typename T>
bool data_transfer(T *dst, const T *src, const int size) {
if (!InitialCuda()) return false;
T *d_dst, *d_src;
size_t nBytes = size * sizeof(T);
hipMalloc((void **)&d_dst, nBytes);
hipMalloc((void **)&d_src, nBytes);
hipMemcpy(d_src, src, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_dst, dst, nBytes, hipMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((size + block.x - 1) / block.x);
hipLaunchKernelGGL(( data_transfer_gpu), dim3(grid), dim3(block), 0, 0, dst, src, size);
hipDeviceSynchronize();
hipMemcpy(dst, d_dst, nBytes, hipMemcpyDeviceToHost);
hipFree(d_dst);
hipFree(d_src);
hipDeviceReset();
return true;
}
DATA_TRANSFER_INST(int);
DATA_TRANSFER_INST(double);
DATA_TRANSFER_INST(float);
} // namespace planning
} // namespace apollo
| f4e4fb800e7c5e3a7065ee79142cdd401151b32b.cu | /******************************************************************************
* Copyright 2019 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <iostream>
#include "planning_block.h"
namespace apollo {
namespace planning {
bool InitialCuda() {
int dev = 0;
cudaDeviceProp deviceProp;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, dev));
CUDA_CHECK(cudaSetDevice(dev));
return true;
}
__global__ void fill_lower_left_gpu(int *iRow, int *jCol, unsigned int *rind_L,
unsigned int *cind_L, const int nnz_L) {
int i = threadIdx.x;
if (i < nnz_L) {
iRow[i] = rind_L[i];
jCol[i] = cind_L[i];
}
}
template <typename T>
__global__ void data_transfer_gpu(T *dst, const T *src, const int size) {
int i = threadIdx.x;
if (i < size) {
dst[i] = src[i];
}
}
bool fill_lower_left(int *iRow, int *jCol, unsigned int *rind_L,
unsigned int *cind_L, const int nnz_L) {
if (!InitialCuda()) return false;
int *d_iRow, *d_jCol;
unsigned int *d_rind_L, *d_cind_L;
unsigned int nBytes = nnz_L * sizeof(int);
unsigned int nUBytes = nnz_L * sizeof(unsigned int);
cudaMalloc((void **)&d_iRow, nBytes);
cudaMalloc((void **)&d_jCol, nBytes);
cudaMalloc((void **)&d_rind_L, nUBytes);
cudaMalloc((void **)&d_cind_L, nUBytes);
cudaMemcpy(d_iRow, iRow, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_jCol, jCol, nBytes, cudaMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((nnz_L + block.x - 1) / block.x);
fill_lower_left_gpu<<<grid, block>>>(d_iRow, d_jCol, d_rind_L, d_cind_L,
nnz_L);
cudaDeviceSynchronize();
cudaMemcpy(rind_L, d_rind_L, nUBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(cind_L, d_cind_L, nUBytes, cudaMemcpyDeviceToHost);
cudaFree(d_iRow);
cudaFree(d_jCol);
cudaFree(d_rind_L);
cudaFree(d_cind_L);
cudaDeviceReset();
return true;
}
template <typename T>
bool data_transfer(T *dst, const T *src, const int size) {
if (!InitialCuda()) return false;
T *d_dst, *d_src;
size_t nBytes = size * sizeof(T);
cudaMalloc((void **)&d_dst, nBytes);
cudaMalloc((void **)&d_src, nBytes);
cudaMemcpy(d_src, src, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_dst, dst, nBytes, cudaMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((size + block.x - 1) / block.x);
data_transfer_gpu<<<grid, block>>>(dst, src, size);
cudaDeviceSynchronize();
cudaMemcpy(dst, d_dst, nBytes, cudaMemcpyDeviceToHost);
cudaFree(d_dst);
cudaFree(d_src);
cudaDeviceReset();
return true;
}
DATA_TRANSFER_INST(int);
DATA_TRANSFER_INST(double);
DATA_TRANSFER_INST(float);
} // namespace planning
} // namespace apollo
|
5ac35964075ef241f12cb92b0bafdc28618a649e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file np_matrix_op.cu
* \brief GPU Implementation of numpy matrix operations
*/
#include "./np_matrix_op-inl.h"
#include "../nn/concat-inl.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(_np_transpose)
.set_attr<FCompute>("FCompute<gpu>", NumpyTranspose<gpu>);
NNVM_REGISTER_OP(_np_reshape)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(_np_squeeze)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(_npi_concatenate)
.set_attr<FCompute>("FCompute<gpu>", ConcatCompute<gpu>);
NNVM_REGISTER_OP(_backward_np_concat)
.set_attr<FCompute>("FCompute<gpu>", ConcatGradCompute<gpu>);
NNVM_REGISTER_OP(_npi_stack)
.set_attr<FCompute>("FCompute<gpu>", StackOpForward<gpu>);
NNVM_REGISTER_OP(_npi_vstack)
.set_attr<FCompute>("FCompute<gpu>", NumpyVstackForward<gpu>);
NNVM_REGISTER_OP(_backward_np_vstack)
.set_attr<FCompute>("FCompute<gpu>", NumpyVstackBackward<gpu>);
NNVM_REGISTER_OP(_np_roll)
.set_attr<FCompute>("FCompute<gpu>", NumpyRollCompute<gpu>);
template<>
void NumpyFlipForwardImpl<gpu>(const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<TBlob>& outputs,
const std::vector<index_t>& stride_,
const std::vector<index_t>& trailing_,
const index_t& flip_index) {
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
mshadow::Tensor<gpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<gpu, 1, uint8_t>(
mshadow::Shape1(flip_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + flip_index * sizeof(index_t);
hipMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
hipMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
hipMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
hipMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<reverse, gpu>::Launch(s, inputs[0].Size(), flip_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
}
NNVM_REGISTER_OP(_npi_flip)
.set_attr<FCompute>("FCompute<gpu>", NumpyFlipForward<gpu>);
NNVM_REGISTER_OP(_backward_npi_flip)
.set_attr<FCompute>("FCompute<gpu>", NumpyFlipForward<gpu>);
} // namespace op
} // namespace mxnet
| 5ac35964075ef241f12cb92b0bafdc28618a649e.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file np_matrix_op.cu
* \brief GPU Implementation of numpy matrix operations
*/
#include "./np_matrix_op-inl.h"
#include "../nn/concat-inl.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(_np_transpose)
.set_attr<FCompute>("FCompute<gpu>", NumpyTranspose<gpu>);
NNVM_REGISTER_OP(_np_reshape)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(_np_squeeze)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(_npi_concatenate)
.set_attr<FCompute>("FCompute<gpu>", ConcatCompute<gpu>);
NNVM_REGISTER_OP(_backward_np_concat)
.set_attr<FCompute>("FCompute<gpu>", ConcatGradCompute<gpu>);
NNVM_REGISTER_OP(_npi_stack)
.set_attr<FCompute>("FCompute<gpu>", StackOpForward<gpu>);
NNVM_REGISTER_OP(_npi_vstack)
.set_attr<FCompute>("FCompute<gpu>", NumpyVstackForward<gpu>);
NNVM_REGISTER_OP(_backward_np_vstack)
.set_attr<FCompute>("FCompute<gpu>", NumpyVstackBackward<gpu>);
NNVM_REGISTER_OP(_np_roll)
.set_attr<FCompute>("FCompute<gpu>", NumpyRollCompute<gpu>);
template<>
void NumpyFlipForwardImpl<gpu>(const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<TBlob>& outputs,
const std::vector<index_t>& stride_,
const std::vector<index_t>& trailing_,
const index_t& flip_index) {
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
mshadow::Tensor<gpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<gpu, 1, uint8_t>(
mshadow::Shape1(flip_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + flip_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<reverse, gpu>::Launch(s, inputs[0].Size(), flip_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
}
NNVM_REGISTER_OP(_npi_flip)
.set_attr<FCompute>("FCompute<gpu>", NumpyFlipForward<gpu>);
NNVM_REGISTER_OP(_backward_npi_flip)
.set_attr<FCompute>("FCompute<gpu>", NumpyFlipForward<gpu>);
} // namespace op
} // namespace mxnet
|
03e0d501e5473904bfb44da6c6455d68bfc800f3.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "gist_impl.h"
#include "gist.h"
#include <hip/hip_runtime.h>
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _GistBinarizeEncoderKernel(
const T* input_data,
bool* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = (input_data[id] > (T)0);
}
template <typename T>
__global__ void _GistBinarizeDecoderKernel(
const bool* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = (input_data[id] ? (T)1 : (T)0);
}
template <typename T>
__global__ void _GistPack1EncoderKernel(
const T* input_data,
uint8_t* output_data,
const size_t factor,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); // id of Y (compressed tensor)
uint8_t out = 0x0;
uint8_t bit_out = 0x0;
size_t begin = id * factor;
size_t end = id * factor + factor;
for(size_t idx = begin; idx < end; idx++){
bool bit = (input_data[idx] > (T)0);
int nidxshift = idx % factor;
bit_out = bit ? (0x80 >> nidxshift) : 0;
out |= bit_out;
}
output_data[id] = out;
}
template <typename T>
__global__ void _GistPack1DecoderKernel(
const uint8_t* input_data,
T* output_data,
const size_t factor,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); // id of Y (uncompressed tensor)
int nidx = id / factor;
int nidxshift = id % factor;
uint8_t mask = 0x80 >> nidxshift;
uint8_t in = input_data[nidx] & mask;
output_data[id] = (in > 0) ? (T)1 : (T)0;
}
template <typename T>
__global__ void _GistPack8EncoderKernel(
const T* input_data,
uint8_t* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T X = input_data[id];
if (X == (T)0) {
output_data[id] = (uint8_t)(0);
return;
}
uint32_t i = (uint32_t)__float_as_uint(X);
uint32_t e_mask = 0x7f800000;
uint32_t m_residual_mask = 0x00080000;
uint32_t m_mask = 0x007fffff;
uint32_t m_size = 23;
uint32_t e_size = 8;
uint32_t pack_e_size = 5;
uint32_t pack_m_size = 2;
uint8_t bias = 127;
switch(sizeof(T)){
case 4:
m_size = 23;
e_size = 8;
e_mask = 0x7f800000;
m_mask = 0x007fffff;
m_residual_mask = 0x00080000;
bias = 127;
break;
case 2:
m_size = 10;
e_size = 5;
e_mask = 0x0f800000;
m_mask = 0x000003ff;
m_residual_mask = 0x00000007;
bias = 15;
break;
}
uint32_t pack_e_shift = e_size - pack_e_size;
uint32_t pack_m_shift = m_size - pack_m_size;
uint32_t s = i >> (m_size + e_size);
uint32_t e = i & e_mask;
e >>= (m_size);
e -= bias;
uint32_t m = i & m_mask;
uint32_t pack_e = e >> pack_e_shift;
uint32_t pack_m = m >> pack_m_shift;
uint32_t m_residual = m & m_residual_mask;
if(m_residual > 0){ // round up
if(pack_m == 0x3){
pack_e +=1; // increase exponent
pack_m = 0;
}
else{
pack_m +=1; // increase mantissa
}
}
if (pack_e >= 0x1f) { //NaN values
pack_e = 0;
}
output_data[id] = (s << (pack_e_size + pack_m_size)) | (pack_e << pack_m_size) | pack_m;
}
template <typename T>
__global__ void _GistPack8DecoderKernel(
const uint8_t* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
uint8_t i = input_data[id];
if (i == 0) {
output_data[id] = (T)0;
return;
}
uint32_t pack_e_size = 5;
uint32_t pack_m_size = 2;
uint32_t pack_e_mask = 0x0000007c;
uint32_t pack_m_mask = 0x00000003;
uint32_t m_size = 23;
uint32_t e_size = 8;
uint32_t bias = 127;
switch(sizeof(T)){
case 4:
m_size = 23;
e_size = 8;
bias = 127;
break;
case 2:
m_size = 10;
e_size = 5;
bias = 15;
break;
}
uint32_t pack_e_shift = e_size - pack_e_size;
uint32_t s = i >> (pack_e_size+ pack_m_size);
uint32_t pack_e = i & pack_e_mask;
pack_e >>= pack_m_size;
uint32_t pack_m = i & pack_m_mask;
uint32_t unpack_e = pack_e << (pack_e_shift + m_size);
unpack_e += bias;
uint32_t unpack_m = pack_m << (m_size -pack_m_size);
uint32_t unpack = (s << (m_size+e_size)) | unpack_e | unpack_m;
output_data[id] = (T)__uint_as_float((unsigned int)unpack);
}
template <typename T>
__global__ void _GistPack16EncoderKernel(
const T* input_data,
half* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T X = input_data[id];
output_data[id] = __float2half(X);
}
template <typename T>
__global__ void _GistPack16DecoderKernel(
const half* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
half X = input_data[id];
output_data[id] = (T)__half2float(X);
}
template <typename T>
__global__ void _GistPackMsfp15EncoderKernel(
const T* input_data,
uint8_t* output_data,
const CUDA_LONG num_threads,
const CUDA_LONG pre_axis_size,
const CUDA_LONG axis_size,
const CUDA_LONG num_tiles,
const CUDA_LONG tile_size) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, num_threads);
// Quantization parameters
const int bits = 7;
// mantissa bits, remove sign
const int m_bits = bits - 1;
// float32 parameters
const uint32_t s_mask = 0x80000000;
const int s_shift = 31;
const int pack_s_shift = 6;
const uint32_t e_mask = 0x7f800000;
const int e_shift = 23;
const int pack_e_shift = 7;
const uint32_t m_mask = 0x007fffff;
const int tile_i = id % num_tiles;
const int pre_axis_i = id / num_tiles;
// Loop over bounding box to find shared exponent
uint32_t shared_exp = 0;
for (size_t i = 0; i < tile_size; i++) {
// Get input
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
T X = input_data[in_i];
uint32_t X_i = (uint32_t)__float_as_uint(X);
// Get exponent
uint32_t exp = (X_i & e_mask) >> e_shift;
// Shared exponent is max of exponents
if (exp > shared_exp) {
shared_exp = exp;
}
}
// If inf/nan is found, zero out values
if (shared_exp >= 0xff) {
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
output_data[in_i] = 0;
}
return;
}
// Copy of shared exponent for packing
uint32_t pack_shared_exp = shared_exp;
// Loop over bounding box to quantize
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
T X = input_data[in_i];
uint32_t X_i = (uint32_t)__float_as_uint(X);
// Get biased exponent
uint32_t exp = (X_i & e_mask) >> e_shift;
uint32_t sign;
uint32_t mantissa;
if (exp == 0) {
// Flush denorm to 0
sign = 0;
mantissa = 0;
} else {
// Decode float
sign = X_i & s_mask;
mantissa = X_i & m_mask;
// Difference in exponents
uint32_t exp_diff = shared_exp - exp;
// Implied 1
mantissa = mantissa + (1 << 23);
// Adjust for shared exponent
mantissa = mantissa >> exp_diff;
// Shift down to target bit width + 1
mantissa = mantissa >> (24 - m_bits - 1);
// Rounding (with overflow check)
if (mantissa != ((1 << (m_bits + 1)) - 1)) {
mantissa += 1;
}
// Shift away last bit
mantissa = mantissa >> 1;
}
// Store {exponent bit, mantissa} in output
uint8_t exp_bit = (pack_shared_exp % 2) << pack_e_shift;
pack_shared_exp = pack_shared_exp >> 1;
output_data[in_i] = (uint8_t) (exp_bit | (sign >> (s_shift - pack_s_shift)) | mantissa);
}
}
template <typename T>
__global__ void _GistPackMsfp15DecoderKernel(
const uint8_t* input_data,
T* output_data,
const CUDA_LONG num_threads,
const CUDA_LONG pre_axis_size,
const CUDA_LONG axis_size,
const CUDA_LONG num_tiles,
const CUDA_LONG tile_size) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, num_threads);
// Quantization parameters
const int bits = 7;
// mantissa bits, remove sign
const int mbits = bits - 1;
const int s_shift = 31;
const int pack_s_shift = 6;
const uint8_t pack_s_mask = 0x40;
const int e_shift = 23;
const int pack_e_shift = 7;
const uint8_t pack_m_mask = 0x3f;
const int tile_i = id % num_tiles;
const int pre_axis_i = id / num_tiles;
// Extract exponent
uint32_t shared_exp = 0;
for (int i = 7; i >= 0; i--) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
shared_exp = shared_exp << 1;
shared_exp += (input_data[in_i] >> pack_e_shift);
}
// De-quantize values
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
uint8_t X = input_data[in_i];
// Get sign bit
uint32_t sign = X & pack_s_mask;
// Get mantissa
uint32_t mantissa = (uint32_t) (X & pack_m_mask);
if (mantissa == 0) {
output_data[in_i] = 0.0;
} else {
// Find leading 1
uint8_t leading_bit_pos = floorf(log2f(mantissa));
// Difference from shared exponent of this value
int exp_diff = 5 - leading_bit_pos;
// Adjust exponent
uint32_t exp = shared_exp - exp_diff;
// Shift back to restore mantissa
mantissa = mantissa << (24 - mbits + exp_diff);
// Remove implied 1
mantissa = mantissa & ((1 << 23) - 1);
// Reconstruct float number
uint32_t output = (sign << (s_shift - pack_s_shift)) | (exp << e_shift) | mantissa;
output_data[in_i] = (float)__uint_as_float(output);
}
}
}
template <typename T>
void GistBinarizeEncoderImpl(
hipStream_t stream,
const T* input_data,
bool* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistBinarizeEncoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistBinarizeDecoderImpl(
hipStream_t stream,
const bool* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistBinarizeDecoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack1EncoderImpl(
hipStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipMemset(output_data, 0, N);
hipLaunchKernelGGL(( _GistPack1EncoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, GIST_PACK1_FACTOR, (CUDA_LONG)N);
}
template <typename T>
void GistPack1DecoderImpl(
hipStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistPack1DecoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, GIST_PACK1_FACTOR, (CUDA_LONG)N);
}
template <typename T>
void GistPack8EncoderImpl(
hipStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistPack8EncoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack8DecoderImpl(
hipStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistPack8DecoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack16EncoderImpl(
hipStream_t stream,
const T* input_data,
half* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistPack16EncoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack16DecoderImpl(
hipStream_t stream,
const half* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistPack16DecoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPackMsfp15EncoderImpl(
hipStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size) {
assert(axis_size % tile_size == 0);
const int num_tiles = static_cast<int>(axis_size / tile_size);
const int threads = static_cast<int>(pre_axis_size * num_tiles);
int blocksPerGrid = (int)(ceil(static_cast<float>(threads) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistPackMsfp15EncoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
input_data,
output_data,
(CUDA_LONG)threads,
(CUDA_LONG)pre_axis_size,
(CUDA_LONG)axis_size,
(CUDA_LONG)num_tiles,
(CUDA_LONG)tile_size
);
}
template <typename T>
void GistPackMsfp15DecoderImpl(
hipStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size) {
assert(axis_size % tile_size == 0);
const int num_tiles = static_cast<int>(axis_size / tile_size);
const int threads = static_cast<int>(pre_axis_size * num_tiles);
int blocksPerGrid = (int)(ceil(static_cast<float>(threads) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GistPackMsfp15DecoderKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
input_data,
output_data,
(CUDA_LONG)threads,
(CUDA_LONG)pre_axis_size,
(CUDA_LONG)axis_size,
(CUDA_LONG)num_tiles,
(CUDA_LONG)tile_size
);
}
#define SPECIALIZED_IMPL_BIN_ENC(T) \
template void GistBinarizeEncoderImpl<T>(hipStream_t stream, const T* input_data, bool* output_data, const size_t N);
#define SPECIALIZED_IMPL_BIN_DEC(T) \
template void GistBinarizeDecoderImpl<T>(hipStream_t stream, const bool* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK1_ENC(T) \
template void GistPack1EncoderImpl<T>(hipStream_t stream, const T* input_data, uint8_t* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK1_DEC(T) \
template void GistPack1DecoderImpl<T>(hipStream_t stream, const uint8_t* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK8_ENC(T) \
template void GistPack8EncoderImpl<T>(hipStream_t stream, const T* input_data, uint8_t* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK8_DEC(T) \
template void GistPack8DecoderImpl<T>(hipStream_t stream, const uint8_t* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK16_ENC(T) \
template void GistPack16EncoderImpl<T>(hipStream_t stream, const T* input_data, half* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK16_DEC(T) \
template void GistPack16DecoderImpl<T>(hipStream_t stream, const half* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACKMSFP15_ENC(T) \
template void GistPackMsfp15EncoderImpl<T>(hipStream_t stream, const T* input_data, uint8_t* output_data, const size_t pre_axis_size, const size_t axis_size, const size_t tile_size);
#define SPECIALIZED_IMPL_PACKMSFP15_DEC(T) \
template void GistPackMsfp15DecoderImpl<T>(hipStream_t stream, const uint8_t* input_data, T* output_data, const size_t pre_axis_size, const size_t axis_size, const size_t tile_size);
SPECIALIZED_IMPL_BIN_ENC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_BIN_ENC(half)
#endif
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_IMPL_BIN_ENC(double)
#endif
SPECIALIZED_IMPL_BIN_DEC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_BIN_DEC(half)
#endif
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_IMPL_BIN_DEC(double)
#endif
SPECIALIZED_IMPL_PACK1_ENC(bool)
SPECIALIZED_IMPL_PACK1_ENC(float)
SPECIALIZED_IMPL_PACK1_DEC(bool)
SPECIALIZED_IMPL_PACK1_DEC(float)
SPECIALIZED_IMPL_PACK8_ENC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_PACK8_ENC(half)
#endif
SPECIALIZED_IMPL_PACK8_DEC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_PACK8_DEC(half)
#endif
SPECIALIZED_IMPL_PACK16_ENC(float)
SPECIALIZED_IMPL_PACK16_DEC(float)
SPECIALIZED_IMPL_PACKMSFP15_ENC(float)
SPECIALIZED_IMPL_PACKMSFP15_DEC(float)
} // namespace cuda
} // namespace onnxruntime
| 03e0d501e5473904bfb44da6c6455d68bfc800f3.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "gist_impl.h"
#include "gist.h"
#include <cuda_runtime.h>
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _GistBinarizeEncoderKernel(
const T* input_data,
bool* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = (input_data[id] > (T)0);
}
template <typename T>
__global__ void _GistBinarizeDecoderKernel(
const bool* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = (input_data[id] ? (T)1 : (T)0);
}
template <typename T>
__global__ void _GistPack1EncoderKernel(
const T* input_data,
uint8_t* output_data,
const size_t factor,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); // id of Y (compressed tensor)
uint8_t out = 0x0;
uint8_t bit_out = 0x0;
size_t begin = id * factor;
size_t end = id * factor + factor;
for(size_t idx = begin; idx < end; idx++){
bool bit = (input_data[idx] > (T)0);
int nidxshift = idx % factor;
bit_out = bit ? (0x80 >> nidxshift) : 0;
out |= bit_out;
}
output_data[id] = out;
}
template <typename T>
__global__ void _GistPack1DecoderKernel(
const uint8_t* input_data,
T* output_data,
const size_t factor,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); // id of Y (uncompressed tensor)
int nidx = id / factor;
int nidxshift = id % factor;
uint8_t mask = 0x80 >> nidxshift;
uint8_t in = input_data[nidx] & mask;
output_data[id] = (in > 0) ? (T)1 : (T)0;
}
template <typename T>
__global__ void _GistPack8EncoderKernel(
const T* input_data,
uint8_t* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T X = input_data[id];
if (X == (T)0) {
output_data[id] = (uint8_t)(0);
return;
}
uint32_t i = (uint32_t)__float_as_uint(X);
uint32_t e_mask = 0x7f800000;
uint32_t m_residual_mask = 0x00080000;
uint32_t m_mask = 0x007fffff;
uint32_t m_size = 23;
uint32_t e_size = 8;
uint32_t pack_e_size = 5;
uint32_t pack_m_size = 2;
uint8_t bias = 127;
switch(sizeof(T)){
case 4:
m_size = 23;
e_size = 8;
e_mask = 0x7f800000;
m_mask = 0x007fffff;
m_residual_mask = 0x00080000;
bias = 127;
break;
case 2:
m_size = 10;
e_size = 5;
e_mask = 0x0f800000;
m_mask = 0x000003ff;
m_residual_mask = 0x00000007;
bias = 15;
break;
}
uint32_t pack_e_shift = e_size - pack_e_size;
uint32_t pack_m_shift = m_size - pack_m_size;
uint32_t s = i >> (m_size + e_size);
uint32_t e = i & e_mask;
e >>= (m_size);
e -= bias;
uint32_t m = i & m_mask;
uint32_t pack_e = e >> pack_e_shift;
uint32_t pack_m = m >> pack_m_shift;
uint32_t m_residual = m & m_residual_mask;
if(m_residual > 0){ // round up
if(pack_m == 0x3){
pack_e +=1; // increase exponent
pack_m = 0;
}
else{
pack_m +=1; // increase mantissa
}
}
if (pack_e >= 0x1f) { //NaN values
pack_e = 0;
}
output_data[id] = (s << (pack_e_size + pack_m_size)) | (pack_e << pack_m_size) | pack_m;
}
template <typename T>
__global__ void _GistPack8DecoderKernel(
const uint8_t* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
uint8_t i = input_data[id];
if (i == 0) {
output_data[id] = (T)0;
return;
}
uint32_t pack_e_size = 5;
uint32_t pack_m_size = 2;
uint32_t pack_e_mask = 0x0000007c;
uint32_t pack_m_mask = 0x00000003;
uint32_t m_size = 23;
uint32_t e_size = 8;
uint32_t bias = 127;
switch(sizeof(T)){
case 4:
m_size = 23;
e_size = 8;
bias = 127;
break;
case 2:
m_size = 10;
e_size = 5;
bias = 15;
break;
}
uint32_t pack_e_shift = e_size - pack_e_size;
uint32_t s = i >> (pack_e_size+ pack_m_size);
uint32_t pack_e = i & pack_e_mask;
pack_e >>= pack_m_size;
uint32_t pack_m = i & pack_m_mask;
uint32_t unpack_e = pack_e << (pack_e_shift + m_size);
unpack_e += bias;
uint32_t unpack_m = pack_m << (m_size -pack_m_size);
uint32_t unpack = (s << (m_size+e_size)) | unpack_e | unpack_m;
output_data[id] = (T)__uint_as_float((unsigned int)unpack);
}
template <typename T>
__global__ void _GistPack16EncoderKernel(
const T* input_data,
half* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T X = input_data[id];
output_data[id] = __float2half(X);
}
template <typename T>
__global__ void _GistPack16DecoderKernel(
const half* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
half X = input_data[id];
output_data[id] = (T)__half2float(X);
}
template <typename T>
__global__ void _GistPackMsfp15EncoderKernel(
const T* input_data,
uint8_t* output_data,
const CUDA_LONG num_threads,
const CUDA_LONG pre_axis_size,
const CUDA_LONG axis_size,
const CUDA_LONG num_tiles,
const CUDA_LONG tile_size) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, num_threads);
// Quantization parameters
const int bits = 7;
// mantissa bits, remove sign
const int m_bits = bits - 1;
// float32 parameters
const uint32_t s_mask = 0x80000000;
const int s_shift = 31;
const int pack_s_shift = 6;
const uint32_t e_mask = 0x7f800000;
const int e_shift = 23;
const int pack_e_shift = 7;
const uint32_t m_mask = 0x007fffff;
const int tile_i = id % num_tiles;
const int pre_axis_i = id / num_tiles;
// Loop over bounding box to find shared exponent
uint32_t shared_exp = 0;
for (size_t i = 0; i < tile_size; i++) {
// Get input
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
T X = input_data[in_i];
uint32_t X_i = (uint32_t)__float_as_uint(X);
// Get exponent
uint32_t exp = (X_i & e_mask) >> e_shift;
// Shared exponent is max of exponents
if (exp > shared_exp) {
shared_exp = exp;
}
}
// If inf/nan is found, zero out values
if (shared_exp >= 0xff) {
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
output_data[in_i] = 0;
}
return;
}
// Copy of shared exponent for packing
uint32_t pack_shared_exp = shared_exp;
// Loop over bounding box to quantize
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
T X = input_data[in_i];
uint32_t X_i = (uint32_t)__float_as_uint(X);
// Get biased exponent
uint32_t exp = (X_i & e_mask) >> e_shift;
uint32_t sign;
uint32_t mantissa;
if (exp == 0) {
// Flush denorm to 0
sign = 0;
mantissa = 0;
} else {
// Decode float
sign = X_i & s_mask;
mantissa = X_i & m_mask;
// Difference in exponents
uint32_t exp_diff = shared_exp - exp;
// Implied 1
mantissa = mantissa + (1 << 23);
// Adjust for shared exponent
mantissa = mantissa >> exp_diff;
// Shift down to target bit width + 1
mantissa = mantissa >> (24 - m_bits - 1);
// Rounding (with overflow check)
if (mantissa != ((1 << (m_bits + 1)) - 1)) {
mantissa += 1;
}
// Shift away last bit
mantissa = mantissa >> 1;
}
// Store {exponent bit, mantissa} in output
uint8_t exp_bit = (pack_shared_exp % 2) << pack_e_shift;
pack_shared_exp = pack_shared_exp >> 1;
output_data[in_i] = (uint8_t) (exp_bit | (sign >> (s_shift - pack_s_shift)) | mantissa);
}
}
template <typename T>
__global__ void _GistPackMsfp15DecoderKernel(
const uint8_t* input_data,
T* output_data,
const CUDA_LONG num_threads,
const CUDA_LONG pre_axis_size,
const CUDA_LONG axis_size,
const CUDA_LONG num_tiles,
const CUDA_LONG tile_size) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, num_threads);
// Quantization parameters
const int bits = 7;
// mantissa bits, remove sign
const int mbits = bits - 1;
const int s_shift = 31;
const int pack_s_shift = 6;
const uint8_t pack_s_mask = 0x40;
const int e_shift = 23;
const int pack_e_shift = 7;
const uint8_t pack_m_mask = 0x3f;
const int tile_i = id % num_tiles;
const int pre_axis_i = id / num_tiles;
// Extract exponent
uint32_t shared_exp = 0;
for (int i = 7; i >= 0; i--) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
shared_exp = shared_exp << 1;
shared_exp += (input_data[in_i] >> pack_e_shift);
}
// De-quantize values
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
uint8_t X = input_data[in_i];
// Get sign bit
uint32_t sign = X & pack_s_mask;
// Get mantissa
uint32_t mantissa = (uint32_t) (X & pack_m_mask);
if (mantissa == 0) {
output_data[in_i] = 0.0;
} else {
// Find leading 1
uint8_t leading_bit_pos = floorf(log2f(mantissa));
// Difference from shared exponent of this value
int exp_diff = 5 - leading_bit_pos;
// Adjust exponent
uint32_t exp = shared_exp - exp_diff;
// Shift back to restore mantissa
mantissa = mantissa << (24 - mbits + exp_diff);
// Remove implied 1
mantissa = mantissa & ((1 << 23) - 1);
// Reconstruct float number
uint32_t output = (sign << (s_shift - pack_s_shift)) | (exp << e_shift) | mantissa;
output_data[in_i] = (float)__uint_as_float(output);
}
}
}
template <typename T>
void GistBinarizeEncoderImpl(
cudaStream_t stream,
const T* input_data,
bool* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistBinarizeEncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistBinarizeDecoderImpl(
cudaStream_t stream,
const bool* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistBinarizeDecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack1EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
cudaMemset(output_data, 0, N);
_GistPack1EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, GIST_PACK1_FACTOR, (CUDA_LONG)N);
}
template <typename T>
void GistPack1DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack1DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, GIST_PACK1_FACTOR, (CUDA_LONG)N);
}
template <typename T>
void GistPack8EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack8EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack8DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack8DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack16EncoderImpl(
cudaStream_t stream,
const T* input_data,
half* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack16EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack16DecoderImpl(
cudaStream_t stream,
const half* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack16DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPackMsfp15EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size) {
assert(axis_size % tile_size == 0);
const int num_tiles = static_cast<int>(axis_size / tile_size);
const int threads = static_cast<int>(pre_axis_size * num_tiles);
int blocksPerGrid = (int)(ceil(static_cast<float>(threads) / GridDim::maxThreadsPerBlock));
_GistPackMsfp15EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
output_data,
(CUDA_LONG)threads,
(CUDA_LONG)pre_axis_size,
(CUDA_LONG)axis_size,
(CUDA_LONG)num_tiles,
(CUDA_LONG)tile_size
);
}
template <typename T>
void GistPackMsfp15DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size) {
assert(axis_size % tile_size == 0);
const int num_tiles = static_cast<int>(axis_size / tile_size);
const int threads = static_cast<int>(pre_axis_size * num_tiles);
int blocksPerGrid = (int)(ceil(static_cast<float>(threads) / GridDim::maxThreadsPerBlock));
_GistPackMsfp15DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
output_data,
(CUDA_LONG)threads,
(CUDA_LONG)pre_axis_size,
(CUDA_LONG)axis_size,
(CUDA_LONG)num_tiles,
(CUDA_LONG)tile_size
);
}
#define SPECIALIZED_IMPL_BIN_ENC(T) \
template void GistBinarizeEncoderImpl<T>(cudaStream_t stream, const T* input_data, bool* output_data, const size_t N);
#define SPECIALIZED_IMPL_BIN_DEC(T) \
template void GistBinarizeDecoderImpl<T>(cudaStream_t stream, const bool* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK1_ENC(T) \
template void GistPack1EncoderImpl<T>(cudaStream_t stream, const T* input_data, uint8_t* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK1_DEC(T) \
template void GistPack1DecoderImpl<T>(cudaStream_t stream, const uint8_t* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK8_ENC(T) \
template void GistPack8EncoderImpl<T>(cudaStream_t stream, const T* input_data, uint8_t* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK8_DEC(T) \
template void GistPack8DecoderImpl<T>(cudaStream_t stream, const uint8_t* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK16_ENC(T) \
template void GistPack16EncoderImpl<T>(cudaStream_t stream, const T* input_data, half* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK16_DEC(T) \
template void GistPack16DecoderImpl<T>(cudaStream_t stream, const half* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACKMSFP15_ENC(T) \
template void GistPackMsfp15EncoderImpl<T>(cudaStream_t stream, const T* input_data, uint8_t* output_data, const size_t pre_axis_size, const size_t axis_size, const size_t tile_size);
#define SPECIALIZED_IMPL_PACKMSFP15_DEC(T) \
template void GistPackMsfp15DecoderImpl<T>(cudaStream_t stream, const uint8_t* input_data, T* output_data, const size_t pre_axis_size, const size_t axis_size, const size_t tile_size);
SPECIALIZED_IMPL_BIN_ENC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_BIN_ENC(half)
#endif
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_IMPL_BIN_ENC(double)
#endif
SPECIALIZED_IMPL_BIN_DEC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_BIN_DEC(half)
#endif
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_IMPL_BIN_DEC(double)
#endif
SPECIALIZED_IMPL_PACK1_ENC(bool)
SPECIALIZED_IMPL_PACK1_ENC(float)
SPECIALIZED_IMPL_PACK1_DEC(bool)
SPECIALIZED_IMPL_PACK1_DEC(float)
SPECIALIZED_IMPL_PACK8_ENC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_PACK8_ENC(half)
#endif
SPECIALIZED_IMPL_PACK8_DEC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_PACK8_DEC(half)
#endif
SPECIALIZED_IMPL_PACK16_ENC(float)
SPECIALIZED_IMPL_PACK16_DEC(float)
SPECIALIZED_IMPL_PACKMSFP15_ENC(float)
SPECIALIZED_IMPL_PACKMSFP15_DEC(float)
} // namespace cuda
} // namespace onnxruntime
|
af5cd3f8ad9da04dfe4d637d5b7d243cd27cef56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file particle_bpf_gpu.cu
* @author Matthew Nicely ([email protected])
* @date 2020-01-06
* @version 1.0
* @brief Contains CUDA functions for parallel version of
* the bootstrap particle filter
*
* @copyright Copyright (c) 2020
*
* @license This project is released under the GNU Public License
*
* @note * Target Processor: Intel x86
* @n * Target Compiler: GCC 7.4.0
* @n * NVCC Compiler: CUDA Toolkit 10.0 or later
*/
#include <hip/hip_cooperative_groups.h> // cooperative groups::this_thread_block, cooperative groups::tiled_partition
#include <hipcub/hipcub.hpp> // cub::CacheModifiedInputIterator, cub::BlockLoad, cub::BlockStore, hipcub::WarpReduce
#include <hiprand/hiprand_kernel.h> // hiprand_init, hiprand_normal, hiprand_uniform, hiprandStateXORWOW_t
#include "models.h"
namespace cg = cooperative_groups;
constexpr auto kSysDim { utility::kSysDim }; // state dimension
constexpr auto kMeasDim { utility::kMeasDim }; // measurement dimension
constexpr auto kMetropolisB { 32 }; // Iterations in Metropolis resampling
constexpr auto kWarpSize { 32 };
constexpr auto kBlocks { 20 };
constexpr auto kTAI { 256 };
constexpr auto kTME { 256 };
constexpr auto kTCE { 256 };
constexpr auto kTPT { 256 };
constexpr auto kTRI { 64 };
constexpr auto kTMR { 256 };
namespace filters {
__constant__ float c_initial_state[kSysDim] {};
__constant__ float c_meas_update[kMeasDim] {};
__constant__ float c_inv_meas_noise_cov[kMeasDim * kMeasDim] {};
__constant__ float c_process_noise_cov[kSysDim * kSysDim] {};
__constant__ float c_initial_noise_cov[kSysDim * kSysDim] {};
__device__ float d_sum_of_particle_weights {};
template<typename T>
__global__ void __launch_bounds__( kTAI )
InitializeFilter( int const num_particles, unsigned long long int const seed, T *__restrict__ particle_state_new ) {
auto const block { cg::this_thread_block( ) };
typedef cub::CacheModifiedInputIterator<cub::LOAD_LDG, T> InputItr;
typedef cub::BlockLoad<T, kTAI, kSysDim, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
typedef cub::BlockStore<T, kTAI, kSysDim, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
__shared__ union TempStorage {
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
} temp_storage;
unsigned int loop = blockIdx.x * blockDim.x * kSysDim;
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
T thread_data[kSysDim] {};
T random_nums[kSysDim] {};
hiprandState_t local_state {};
hiprand_init( static_cast<unsigned long long int>( seed + tid ), 0, 0, &local_state );
#pragma unroll kSysDim
for ( T &x : random_nums ) {
x = hiprand_normal( &local_state );
}
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] = c_initial_state[i];
#pragma unroll kSysDim
for ( int j = 0; j < kSysDim; j++ ) {
thread_data[i] += c_initial_noise_cov[i * kSysDim + j] * random_nums[j];
}
}
BlockStore( temp_storage.store ).Store( particle_state_new + loop, thread_data );
block.sync( );
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
template<typename T>
__global__ void __launch_bounds__( kTME ) ComputeMeasErrors( int const num_particles,
T const *__restrict__ particle_state_new,
T *__restrict__ particle_weights,
T *__restrict__ particle_state ) {
auto const block { cg::this_thread_block( ) };
/*
* Sum particle weights using BlockReduce in this kernel
* to save on global memory loads later
* Note that d_sum_of_particle_weights is reset to zero in
* void ComputeParticleTransitionCuda
*/
typedef cub::CacheModifiedInputIterator<cub::LOAD_LDG, T> InputItr;
typedef cub::BlockLoad<T, kTME, kSysDim, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
typedef cub::BlockStore<T, kTME, kSysDim, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
typedef hipcub::BlockReduce<T, kTME> BlockReduce;
__shared__ union TempStorage {
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
typename BlockReduce::TempStorage reduce;
} temp_storage;
unsigned int loop { blockIdx.x * blockDim.x * kSysDim };
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
T thread_data[kSysDim] {};
T estimates[kMeasDim] {};
T errors[kMeasDim] {};
BlockLoad( temp_storage.load ).Load( InputItr( particle_state_new + loop ), thread_data );
block.sync( );
models::MeasModelMath( thread_data, estimates );
T sum {};
#pragma unroll kMeasDim
for ( int i = 0; i < kMeasDim; i++ ) {
errors[i] = c_meas_update[i] - estimates[i];
errors[i] *= errors[i];
}
#pragma unroll kMeasDim
for ( int i = 0; i < kMeasDim; i++ ) {
#pragma unroll kMeasDim
for ( int j = 0; j < kMeasDim; j++ ) {
sum += c_inv_meas_noise_cov[i * kMeasDim + j] * errors[j];
}
}
float particle_weight { expf( sum * -0.5f ) };
particle_weights[tid] = particle_weight;
float blockSum { BlockReduce( temp_storage.reduce ).Sum( particle_weight ) };
block.sync( );
if ( threadIdx.x == 0 ) {
atomicAdd( &d_sum_of_particle_weights, blockSum );
}
BlockStore( temp_storage.store ).Store( particle_state + loop, thread_data );
block.sync( );
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
template<typename T>
__global__ void __launch_bounds__( kTCE ) ComputeEstimates( int const num_particles,
int const time_step,
int const resampling_method,
T const *__restrict__ particle_state,
T *__restrict__ filtered_estimates,
T *__restrict__ particle_weights ) {
auto const block = cg::this_thread_block( );
auto const tile_32 = cg::tiled_partition( block, 32 );
auto const laneID = tile_32.thread_rank( );
typedef cub::CacheModifiedInputIterator<cub::LOAD_LDG, T> InputItr;
typedef cub::BlockLoad<T, kTCE, kSysDim, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
typedef hipcub::WarpReduce<T> WarpReduce;
__shared__ union TempStorage {
typename BlockLoad::TempStorage load;
typename WarpReduce::TempStorage warpReduce[kWarpSize];
} temp_storage;
__shared__ T s_partial_reduce[kWarpSize]; // kWarpSize is 32. Allows for 32
// warps (1024 threads)
__shared__ T s_final_reduce[kSysDim];
unsigned int const warp_id { threadIdx.x >> 5 };
unsigned int loop { blockIdx.x * blockDim.x * kSysDim };
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
if ( warp_id == 0 ) {
s_partial_reduce[laneID] = 0; // Initialize shared memory
}
T thread_data[kSysDim] {};
T val {};
T normalized { particle_weights[tid] / d_sum_of_particle_weights };
// Load a segment of consecutive items that are blocked across threads
BlockLoad( temp_storage.load ).Load( InputItr( particle_state + loop ), thread_data );
block.sync( );
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] *= normalized;
// Each warp perform reduction
val = WarpReduce( temp_storage.warpReduce[warp_id] ).Sum( thread_data[i] );
// Write reduced value to shared memory
if ( laneID == 0 ) {
s_partial_reduce[warp_id] = val;
}
block.sync( ); // Wait for all partial reductions
// Read from shared memory only if that warp existed
if ( warp_id == 0 ) {
val = WarpReduce( temp_storage.warpReduce[0] ).Sum( s_partial_reduce[laneID] );
}
if ( threadIdx.x == 0 ) {
s_final_reduce[i] = val;
}
block.sync( ); // Wait for final reduction
}
/*
* For systematic and stratified resampling, normalized weights are
* need. To save on global loads in future kernels, particle_weights is
* normalized and written back to globals.
* For Metropolis, we normalize for filter estimates but don't store
* normalized weights back to global.
*/
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
particle_weights[tid] = normalized;
}
if ( threadIdx.x < kSysDim ) {
atomicAdd( &filtered_estimates[time_step * kSysDim + laneID], s_final_reduce[laneID] );
}
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
template<typename T>
__device__ void ResamplingUpPerWarp( cg::thread_block_tile<kWarpSize> const &tile_32,
unsigned int const & tid,
int const & num_particles,
T const & distro,
T * shared,
T *__restrict__ prefix_sum,
int *__restrict__ resampling_index_up ) {
T const tidf { static_cast<T>( tid ) };
auto const t { tile_32.thread_rank( ) };
int l {};
int idx {};
T a {};
T b {};
bool mask { true };
if ( tid < num_particles - kWarpSize - l ) {
shared[t] = prefix_sum[tid + l];
shared[t + kWarpSize] = prefix_sum[tid + kWarpSize + l];
}
// Distribution will be the same for each Monte Carlo
T const draw = ( distro + tidf ) / num_particles;
tile_32.sync( );
while ( tile_32.any( mask ) ) {
if ( tid < num_particles - ( kTRI )-l ) {
a = prefix_sum[tid + kWarpSize + l];
b = prefix_sum[tid + kTRI + l];
#pragma unroll kWarpSize
for ( int i = 0; i < kWarpSize; i++ ) {
mask = shared[t + i] < draw;
if ( mask ) {
idx++;
}
}
l += kWarpSize;
shared[t] = a;
shared[t + kWarpSize] = b;
tile_32.sync( );
} else {
while ( mask && tid < ( num_particles - l ) ) {
mask = prefix_sum[tid + l] < draw;
if ( mask ) {
idx++;
}
l++;
}
}
tile_32.sync( );
}
resampling_index_up[tid] = idx;
}
template<typename T>
__global__ void __launch_bounds__( kTRI )
ComputeResampleIndexSysUpSharedPrefetch64( int const num_particles,
unsigned long long int const seed,
int const resampling_method,
int *__restrict__ resampling_index_up,
T *__restrict__ prefix_sum ) {
auto const tile_32 = cg::tiled_partition<kWarpSize>( cg::this_thread_block( ) );
__shared__ T s_warp_0[kTRI];
__shared__ T s_warp_1[kTRI];
// Setting prefix_sum[n - 1] in each block versus call a separate kernel
// beforehand. Set last value in prefix-sum to 1.0f
if ( threadIdx.x == 0 ) {
prefix_sum[num_particles - 1] = 1.0f; //
}
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
hiprandStateXORWOW_t local_state {};
T distro {};
if ( resampling_method == static_cast<int>( utility::Method::kSystematic ) ) {
hiprand_init( seed, 0, 0, &local_state );
distro = hiprand_uniform( &local_state );
} else if ( resampling_method == static_cast<int>( utility::Method::kStratified ) ) {
hiprand_init( seed + tid, 0, 0, &local_state );
distro = hiprand_uniform( &local_state );
}
if ( threadIdx.x < kWarpSize ) {
ResamplingUpPerWarp( tile_32, tid, num_particles, distro, s_warp_0, prefix_sum, resampling_index_up );
} else {
ResamplingUpPerWarp( tile_32, tid, num_particles, distro, s_warp_1, prefix_sum, resampling_index_up );
}
}
}
template<typename T>
__device__ void ResamplingDownPerWarp( cg::thread_block_tile<kWarpSize> const &tile_32,
unsigned int const & tid,
int const & num_particles,
T const & distro,
T * shared,
T *__restrict__ prefix_sum,
int *__restrict__ resampling_index_down ) {
T const tidf { static_cast<T>( tid ) };
auto const t { tile_32.thread_rank( ) };
int l {};
int idx {};
T a {};
T b {};
bool mask { false };
// Preload in into shared memory
if ( tid >= kWarpSize + l ) {
shared[t] = prefix_sum[tid - kWarpSize - l];
shared[t + kWarpSize] = prefix_sum[tid - l];
}
// Distribution will be the same for each Monte Carlo
T const draw { ( distro + tidf ) / num_particles };
tile_32.sync( );
while ( !tile_32.all( mask ) ) {
if ( tid >= kTRI + l ) {
a = prefix_sum[tid - ( kTRI )-l];
b = prefix_sum[tid - kWarpSize - l];
#pragma unroll
for ( int i = 1; i < kWarpSize + 1; i++ ) {
mask = shared[t + kWarpSize - i] < draw;
if ( !mask ) {
idx--;
}
}
l += kWarpSize;
shared[t] = a;
shared[t + kWarpSize] = b;
tile_32.sync( );
} else {
while ( !mask ) {
if ( tid > l ) {
mask = prefix_sum[tid - ( l + 1 )] < draw;
} else {
mask = true;
}
if ( !mask ) {
idx--;
}
l++;
}
}
tile_32.sync( );
}
resampling_index_down[tid] = idx;
}
template<typename T>
__global__ void __launch_bounds__( kTRI )
ComputeResampleIndexSysDownSharedPrefetch64( int const num_particles,
unsigned long long int const seed,
int const resampling_method,
int *__restrict__ resampling_index_down,
T *__restrict__ prefix_sum ) {
auto const tile_32 = cg::tiled_partition<kWarpSize>( cg::this_thread_block( ) );
__shared__ T s_warp_0[kTRI];
__shared__ T s_warp_1[kTRI];
// Setting prefix_sum_particle_weights[n - 1] in each block versus call a
// separate kernel beforehand
if ( threadIdx.x == 0 ) {
prefix_sum[num_particles - 1] = 1.0f;
}
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
hiprandStateXORWOW_t local_state {};
T distro {};
if ( resampling_method == static_cast<int>( utility::Method::kSystematic ) ) {
hiprand_init( seed, 0, 0, &local_state );
distro = hiprand_uniform( &local_state );
} else if ( resampling_method == static_cast<int>( utility::Method::kStratified ) ) {
hiprand_init( seed + tid, 0, 0, &local_state );
distro = hiprand_uniform( &local_state );
}
if ( threadIdx.x < kWarpSize ) {
ResamplingDownPerWarp( tile_32, tid, num_particles, distro, s_warp_0, prefix_sum, resampling_index_down );
} else {
ResamplingDownPerWarp( tile_32, tid, num_particles, distro, s_warp_1, prefix_sum, resampling_index_down );
}
}
}
template<typename T>
__global__ void __launch_bounds__( kTMR ) ComputeResampleIndexMetropolisC2( int const num_particles,
unsigned long long int const seed,
T const *__restrict__ particle_weights,
int *__restrict__ resampling_index_down ) {
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
unsigned int idx { tid };
unsigned int key {};
unsigned int warp {};
T den { particle_weights[tid] };
T num {};
T random_num {};
hiprandStateXORWOW_t local_state {};
hiprandStateXORWOW_t rand_state {};
unsigned long long int local_seed { static_cast<unsigned long long int>( tid >> 5 ) + seed };
// same random number for 0-based warp entire grid
hiprand_init( local_seed, 0, 0, &local_state );
hiprand_init( local_seed, 0, 0, &rand_state );
// Calculate s(warp) using warp index. Threads in warp have same value
int ss { kWarpSize }; // Size of segment
int sc { num_particles / ss }; // The number of segments
int dc { ss };
for ( int i = 0; i < kMetropolisB; i++ ) {
warp = static_cast<unsigned int>( hiprand_uniform( &local_state ) *
( sc - 1 ) ); // Random number [0 -> number of warps]
random_num = hiprand_uniform( &rand_state );
key = static_cast<unsigned int>( hiprand_uniform( &rand_state ) * ( dc - 1 ) );
key = warp * dc + key;
num = particle_weights[key];
if ( random_num <= ( num / den ) ) {
den = num;
idx = key;
}
}
resampling_index_down[tid] = idx;
}
}
template<typename T>
__global__ void __launch_bounds__( kTPT ) ComputeParticleTransition( int const num_particles,
unsigned long long int const seed,
int const resampling_method,
int const *__restrict__ resampling_index_up,
int const *__restrict__ resampling_index_down,
T const *__restrict__ particle_state,
T *__restrict__ particle_state_new ) {
auto const block { cg::this_thread_block( ) };
typedef cub::BlockStore<T, kTPT, kSysDim, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
__shared__ typename BlockStore::TempStorage temp_storage;
unsigned int loop { blockIdx.x * blockDim.x * kSysDim };
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
int idx {};
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
idx = static_cast<int>( tid ) + resampling_index_up[tid] + resampling_index_down[tid];
} else {
idx = resampling_index_down[tid];
}
T model_update[kSysDim] {};
T thread_data[kSysDim] {};
T random_nums[kSysDim] {};
hiprandState_t local_state {};
hiprand_init( static_cast<unsigned long long int>( seed + tid ), 0, 0, &local_state );
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] = particle_state[idx * kSysDim + i];
random_nums[i] = hiprand_normal( &local_state );
}
models::SysModelMath( thread_data, model_update );
// Reuse thread_data to ease register pressure
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] = model_update[i];
#pragma unroll kSysDim
for ( int j = 0; j < kSysDim; j++ ) {
thread_data[i] += c_process_noise_cov[i * kSysDim + j] * random_nums[j];
}
}
BlockStore( temp_storage ).Store( particle_state_new + loop, thread_data );
block.sync( );
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
// Wrappers
template<typename T>
void InitializeFilterCuda( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
T const * pin_sq_initial_noise_cov,
hipEvent_t * events,
T * particle_state_new ) {
int const threads_per_block { kTAI };
int const blocks_per_grid { kBlocks * sm_count };
unsigned long long int seed { static_cast<unsigned long long int>( clock( ) ) };
CUDA_RT_CALL( hipMemcpyToSymbolAsync( c_initial_noise_cov,
pin_sq_initial_noise_cov,
kSysDim * kSysDim * sizeof( T ),
0,
hipMemcpyHostToDevice,
streams[0] ) );
void *args[] { const_cast<int *>( &num_particles ), &seed, &particle_state_new };
CUDA_RT_CALL( cudaLaunchKernel(
reinterpret_cast<void *>( &InitializeFilter<T> ), blocks_per_grid, threads_per_block, args, 0, streams[0] ) );
}
template<typename T>
void ComputeMeasErrorsCuda( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
T const * pin_inv_meas_noise_cov,
T const * pin_meas_update,
T const * particle_state_new,
hipEvent_t * events,
T * particle_weights,
T * particle_state ) {
int const threads_per_block { kTME };
int const blocks_per_grid { kBlocks * sm_count };
CUDA_RT_CALL( hipMemcpyToSymbolAsync( c_inv_meas_noise_cov,
pin_inv_meas_noise_cov,
kMeasDim * kMeasDim * sizeof( T ),
0,
hipMemcpyHostToDevice,
streams[1] ) );
CUDA_RT_CALL( hipEventRecord( events[1], streams[1] ) );
CUDA_RT_CALL( hipMemcpyToSymbolAsync(
c_meas_update, pin_meas_update, kMeasDim * sizeof( T ), 0, hipMemcpyHostToDevice, streams[0] ) );
// Wait for hipMemcpyToSymbolAsync -> c_inv_meas_noise_cov
CUDA_RT_CALL( hipStreamWaitEvent( streams[0], events[1], 0 ) );
void *args[] { const_cast<int *>( &num_particles ), &particle_state_new, &particle_weights, &particle_state };
CUDA_RT_CALL( cudaLaunchKernel(
reinterpret_cast<void *>( &ComputeMeasErrors<T> ), blocks_per_grid, threads_per_block, args, 0, streams[0] ) );
}
template<typename T>
void ComputeEstimatesCuda( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
T const * particle_state,
hipEvent_t * events,
T * filtered_estimates,
T * particle_weights ) {
int const threads_per_block { kTCE };
int const blocks_per_grid { kBlocks * sm_count };
void *args[] { const_cast<int *>( &num_particles ),
const_cast<int *>( &time_step ),
const_cast<int *>( &resampling_method ),
&particle_state,
&filtered_estimates,
&particle_weights };
CUDA_RT_CALL( cudaLaunchKernel(
reinterpret_cast<void *>( &ComputeEstimates<T> ), blocks_per_grid, threads_per_block, args, 0, streams[0] ) );
}
template<typename T>
void ComputeResampleIndexCuda( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
T const * particle_weights,
T * prefix_sum_particle_weights,
hipEvent_t * events,
int * resampling_index_up,
int * resampling_index_down ) {
unsigned long long int seed { static_cast<unsigned long long int>( clock( ) ) };
// If Systematic and Stratified
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
int const threads_per_block { kTRI };
int blocks_per_grid {};
if ( num_particles > 100000 ) {
blocks_per_grid = 2 * kBlocks * sm_count;
} // Better performance with more blocks
else {
blocks_per_grid = kBlocks * sm_count;
} // Better performance with fewer blocks
//*********************** Perform Cumulative Sum
//***************************
void * d_temp_storage { nullptr };
size_t temp_storage_bytes {};
// Determine temporary device storage requirements for inclusive prefix
// sum on normalized particleWeights
hipcub::DeviceScan::InclusiveSum( d_temp_storage,
temp_storage_bytes,
particle_weights,
prefix_sum_particle_weights,
num_particles,
streams[0],
false );
// Allocate temporary storage
CUDA_RT_CALL( hipMalloc( &d_temp_storage, temp_storage_bytes ) );
// Run inclusive prefix sum
hipcub::DeviceScan::InclusiveSum( d_temp_storage,
temp_storage_bytes,
particle_weights,
prefix_sum_particle_weights,
num_particles,
streams[0],
false );
// Sync cumulative sum
CUDA_RT_CALL( hipEventRecord( events[1], streams[0] ) );
void *args_up[] { const_cast<int *>( &num_particles ),
&seed,
const_cast<int *>( &resampling_method ),
&resampling_index_up,
&prefix_sum_particle_weights };
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeResampleIndexSysUpSharedPrefetch64<T> ),
blocks_per_grid,
threads_per_block,
args_up,
0,
streams[0] ) );
CUDA_RT_CALL( hipStreamWaitEvent( streams[1], events[1], 0 ) ); // Wait for InclusiveSum
void *args_down[] { const_cast<int *>( &num_particles ),
&seed,
const_cast<int *>( &resampling_method ),
&resampling_index_down,
&prefix_sum_particle_weights };
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeResampleIndexSysDownSharedPrefetch64<T> ),
blocks_per_grid,
threads_per_block,
args_down,
0,
streams[1] ) );
CUDA_RT_CALL( hipEventRecord( events[0], streams[1] ) );
} else {
int const threads_per_block { kTMR };
int const blocks_per_grid { kBlocks * sm_count };
void *args[] { const_cast<int *>( &num_particles ), &seed, &particle_weights, &resampling_index_down };
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeResampleIndexMetropolisC2<T> ),
blocks_per_grid,
threads_per_block,
args,
0,
streams[0] ) );
CUDA_RT_CALL( hipEventRecord( events[0], streams[0] ) );
}
}
template<typename T>
void ComputeParticleTransitionCuda( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
int const & resampling_method,
T const * pin_sq_process_noise_cov,
T const * particle_state,
int const * resampling_index_up,
int const * resampling_index_down,
hipEvent_t * events,
T * particle_state_new ) {
// Get d_sum_of_particle_weights address for reset
float *h_sum_of_particle_weights;
CUDA_RT_CALL( hipGetSymbolAddress( ( void ** )&h_sum_of_particle_weights, d_sum_of_particle_weights ) );
unsigned long long int seed { static_cast<unsigned long long int>( clock( ) ) };
int const threads_per_block { kTPT };
int const blocks_per_grid { kBlocks * sm_count };
CUDA_RT_CALL( hipMemcpyToSymbolAsync( c_process_noise_cov,
pin_sq_process_noise_cov,
kSysDim * kSysDim * sizeof( T ),
0,
hipMemcpyHostToDevice,
streams[0] ) );
void *args[] { const_cast<int *>( &num_particles ),
&seed,
const_cast<int *>( &resampling_method ),
&resampling_index_up,
&resampling_index_down,
&particle_state,
&particle_state_new };
// Systematic and Stratified must wait on
// ComputeResampleIndexSysDownSharedPrefetch64
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
CUDA_RT_CALL( hipStreamWaitEvent( streams[0], events[0], 0 ) );
} // Wait for ComputeResampleIndexSysDownSharedPrefetch64
else {
CUDA_RT_CALL( hipStreamWaitEvent( streams[1], events[0], 0 ) );
} // Wait for ComputeResampleIndexMetropolisC2
// Reset d_sum_of_particle_weights before next time step
// If Metropolis, make sure it's not reset before ComputeEstimates is
// finished
CUDA_RT_CALL( hipMemsetAsync( h_sum_of_particle_weights, 0, sizeof( T ), streams[1] ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeParticleTransition<T> ),
blocks_per_grid,
threads_per_block,
args,
0,
streams[0] ) );
}
// Explicit specializations needed to generate code
template void InitializeFilterCuda<float>( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
float const * pin_sq_initial_noise_cov,
hipEvent_t * events,
float * particle_state_new );
template void ComputeMeasErrorsCuda<float>( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
float const * pin_inv_meas_noise_cov,
float const * pin_meas_update,
float const * particle_state_new,
hipEvent_t * events,
float * particle_weights,
float * particle_state );
template void ComputeEstimatesCuda<float>( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
float const * particle_state,
hipEvent_t * events,
float * filtered_estimates,
float * particle_weights );
template void ComputeResampleIndexCuda<float>( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
float const * particle_weights,
float * prefix_sum_particle_weights,
hipEvent_t * events,
int * resampling_index_up,
int * resampling_index_down );
template void ComputeParticleTransitionCuda<float>( int const & sm_count,
hipStream_t const *streams,
int const & num_particles,
int const & resampling_method,
float const * pin_sq_process_noise_cov,
float const * particle_state,
int const * resampling_index_up,
int const * resampling_index_down,
hipEvent_t * events,
float * particle_state_new );
} /* namespace filters */
| af5cd3f8ad9da04dfe4d637d5b7d243cd27cef56.cu | /**
* @file particle_bpf_gpu.cu
* @author Matthew Nicely ([email protected])
* @date 2020-01-06
* @version 1.0
* @brief Contains CUDA functions for parallel version of
* the bootstrap particle filter
*
* @copyright Copyright (c) 2020
*
* @license This project is released under the GNU Public License
*
* @note * Target Processor: Intel x86
* @n * Target Compiler: GCC 7.4.0
* @n * NVCC Compiler: CUDA Toolkit 10.0 or later
*/
#include <cooperative_groups.h> // cooperative groups::this_thread_block, cooperative groups::tiled_partition
#include <cub/cub.cuh> // cub::CacheModifiedInputIterator, cub::BlockLoad, cub::BlockStore, cub::WarpReduce
#include <curand_kernel.h> // curand_init, curand_normal, curand_uniform, curandStateXORWOW_t
#include "models.h"
namespace cg = cooperative_groups;
constexpr auto kSysDim { utility::kSysDim }; // state dimension
constexpr auto kMeasDim { utility::kMeasDim }; // measurement dimension
constexpr auto kMetropolisB { 32 }; // Iterations in Metropolis resampling
constexpr auto kWarpSize { 32 };
constexpr auto kBlocks { 20 };
constexpr auto kTAI { 256 };
constexpr auto kTME { 256 };
constexpr auto kTCE { 256 };
constexpr auto kTPT { 256 };
constexpr auto kTRI { 64 };
constexpr auto kTMR { 256 };
namespace filters {
__constant__ float c_initial_state[kSysDim] {};
__constant__ float c_meas_update[kMeasDim] {};
__constant__ float c_inv_meas_noise_cov[kMeasDim * kMeasDim] {};
__constant__ float c_process_noise_cov[kSysDim * kSysDim] {};
__constant__ float c_initial_noise_cov[kSysDim * kSysDim] {};
__device__ float d_sum_of_particle_weights {};
template<typename T>
__global__ void __launch_bounds__( kTAI )
InitializeFilter( int const num_particles, unsigned long long int const seed, T *__restrict__ particle_state_new ) {
auto const block { cg::this_thread_block( ) };
typedef cub::CacheModifiedInputIterator<cub::LOAD_LDG, T> InputItr;
typedef cub::BlockLoad<T, kTAI, kSysDim, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
typedef cub::BlockStore<T, kTAI, kSysDim, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
__shared__ union TempStorage {
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
} temp_storage;
unsigned int loop = blockIdx.x * blockDim.x * kSysDim;
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
T thread_data[kSysDim] {};
T random_nums[kSysDim] {};
curandState local_state {};
curand_init( static_cast<unsigned long long int>( seed + tid ), 0, 0, &local_state );
#pragma unroll kSysDim
for ( T &x : random_nums ) {
x = curand_normal( &local_state );
}
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] = c_initial_state[i];
#pragma unroll kSysDim
for ( int j = 0; j < kSysDim; j++ ) {
thread_data[i] += c_initial_noise_cov[i * kSysDim + j] * random_nums[j];
}
}
BlockStore( temp_storage.store ).Store( particle_state_new + loop, thread_data );
block.sync( );
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
template<typename T>
__global__ void __launch_bounds__( kTME ) ComputeMeasErrors( int const num_particles,
T const *__restrict__ particle_state_new,
T *__restrict__ particle_weights,
T *__restrict__ particle_state ) {
auto const block { cg::this_thread_block( ) };
/*
* Sum particle weights using BlockReduce in this kernel
* to save on global memory loads later
* Note that d_sum_of_particle_weights is reset to zero in
* void ComputeParticleTransitionCuda
*/
typedef cub::CacheModifiedInputIterator<cub::LOAD_LDG, T> InputItr;
typedef cub::BlockLoad<T, kTME, kSysDim, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
typedef cub::BlockStore<T, kTME, kSysDim, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
typedef cub::BlockReduce<T, kTME> BlockReduce;
__shared__ union TempStorage {
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
typename BlockReduce::TempStorage reduce;
} temp_storage;
unsigned int loop { blockIdx.x * blockDim.x * kSysDim };
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
T thread_data[kSysDim] {};
T estimates[kMeasDim] {};
T errors[kMeasDim] {};
BlockLoad( temp_storage.load ).Load( InputItr( particle_state_new + loop ), thread_data );
block.sync( );
models::MeasModelMath( thread_data, estimates );
T sum {};
#pragma unroll kMeasDim
for ( int i = 0; i < kMeasDim; i++ ) {
errors[i] = c_meas_update[i] - estimates[i];
errors[i] *= errors[i];
}
#pragma unroll kMeasDim
for ( int i = 0; i < kMeasDim; i++ ) {
#pragma unroll kMeasDim
for ( int j = 0; j < kMeasDim; j++ ) {
sum += c_inv_meas_noise_cov[i * kMeasDim + j] * errors[j];
}
}
float particle_weight { expf( sum * -0.5f ) };
particle_weights[tid] = particle_weight;
float blockSum { BlockReduce( temp_storage.reduce ).Sum( particle_weight ) };
block.sync( );
if ( threadIdx.x == 0 ) {
atomicAdd( &d_sum_of_particle_weights, blockSum );
}
BlockStore( temp_storage.store ).Store( particle_state + loop, thread_data );
block.sync( );
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
template<typename T>
__global__ void __launch_bounds__( kTCE ) ComputeEstimates( int const num_particles,
int const time_step,
int const resampling_method,
T const *__restrict__ particle_state,
T *__restrict__ filtered_estimates,
T *__restrict__ particle_weights ) {
auto const block = cg::this_thread_block( );
auto const tile_32 = cg::tiled_partition( block, 32 );
auto const laneID = tile_32.thread_rank( );
typedef cub::CacheModifiedInputIterator<cub::LOAD_LDG, T> InputItr;
typedef cub::BlockLoad<T, kTCE, kSysDim, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
typedef cub::WarpReduce<T> WarpReduce;
__shared__ union TempStorage {
typename BlockLoad::TempStorage load;
typename WarpReduce::TempStorage warpReduce[kWarpSize];
} temp_storage;
__shared__ T s_partial_reduce[kWarpSize]; // kWarpSize is 32. Allows for 32
// warps (1024 threads)
__shared__ T s_final_reduce[kSysDim];
unsigned int const warp_id { threadIdx.x >> 5 };
unsigned int loop { blockIdx.x * blockDim.x * kSysDim };
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
if ( warp_id == 0 ) {
s_partial_reduce[laneID] = 0; // Initialize shared memory
}
T thread_data[kSysDim] {};
T val {};
T normalized { particle_weights[tid] / d_sum_of_particle_weights };
// Load a segment of consecutive items that are blocked across threads
BlockLoad( temp_storage.load ).Load( InputItr( particle_state + loop ), thread_data );
block.sync( );
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] *= normalized;
// Each warp perform reduction
val = WarpReduce( temp_storage.warpReduce[warp_id] ).Sum( thread_data[i] );
// Write reduced value to shared memory
if ( laneID == 0 ) {
s_partial_reduce[warp_id] = val;
}
block.sync( ); // Wait for all partial reductions
// Read from shared memory only if that warp existed
if ( warp_id == 0 ) {
val = WarpReduce( temp_storage.warpReduce[0] ).Sum( s_partial_reduce[laneID] );
}
if ( threadIdx.x == 0 ) {
s_final_reduce[i] = val;
}
block.sync( ); // Wait for final reduction
}
/*
* For systematic and stratified resampling, normalized weights are
* need. To save on global loads in future kernels, particle_weights is
* normalized and written back to globals.
* For Metropolis, we normalize for filter estimates but don't store
* normalized weights back to global.
*/
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
particle_weights[tid] = normalized;
}
if ( threadIdx.x < kSysDim ) {
atomicAdd( &filtered_estimates[time_step * kSysDim + laneID], s_final_reduce[laneID] );
}
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
template<typename T>
__device__ void ResamplingUpPerWarp( cg::thread_block_tile<kWarpSize> const &tile_32,
unsigned int const & tid,
int const & num_particles,
T const & distro,
T * shared,
T *__restrict__ prefix_sum,
int *__restrict__ resampling_index_up ) {
T const tidf { static_cast<T>( tid ) };
auto const t { tile_32.thread_rank( ) };
int l {};
int idx {};
T a {};
T b {};
bool mask { true };
if ( tid < num_particles - kWarpSize - l ) {
shared[t] = prefix_sum[tid + l];
shared[t + kWarpSize] = prefix_sum[tid + kWarpSize + l];
}
// Distribution will be the same for each Monte Carlo
T const draw = ( distro + tidf ) / num_particles;
tile_32.sync( );
while ( tile_32.any( mask ) ) {
if ( tid < num_particles - ( kTRI )-l ) {
a = prefix_sum[tid + kWarpSize + l];
b = prefix_sum[tid + kTRI + l];
#pragma unroll kWarpSize
for ( int i = 0; i < kWarpSize; i++ ) {
mask = shared[t + i] < draw;
if ( mask ) {
idx++;
}
}
l += kWarpSize;
shared[t] = a;
shared[t + kWarpSize] = b;
tile_32.sync( );
} else {
while ( mask && tid < ( num_particles - l ) ) {
mask = prefix_sum[tid + l] < draw;
if ( mask ) {
idx++;
}
l++;
}
}
tile_32.sync( );
}
resampling_index_up[tid] = idx;
}
template<typename T>
__global__ void __launch_bounds__( kTRI )
ComputeResampleIndexSysUpSharedPrefetch64( int const num_particles,
unsigned long long int const seed,
int const resampling_method,
int *__restrict__ resampling_index_up,
T *__restrict__ prefix_sum ) {
auto const tile_32 = cg::tiled_partition<kWarpSize>( cg::this_thread_block( ) );
__shared__ T s_warp_0[kTRI];
__shared__ T s_warp_1[kTRI];
// Setting prefix_sum[n - 1] in each block versus call a separate kernel
// beforehand. Set last value in prefix-sum to 1.0f
if ( threadIdx.x == 0 ) {
prefix_sum[num_particles - 1] = 1.0f; //
}
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
curandStateXORWOW_t local_state {};
T distro {};
if ( resampling_method == static_cast<int>( utility::Method::kSystematic ) ) {
curand_init( seed, 0, 0, &local_state );
distro = curand_uniform( &local_state );
} else if ( resampling_method == static_cast<int>( utility::Method::kStratified ) ) {
curand_init( seed + tid, 0, 0, &local_state );
distro = curand_uniform( &local_state );
}
if ( threadIdx.x < kWarpSize ) {
ResamplingUpPerWarp( tile_32, tid, num_particles, distro, s_warp_0, prefix_sum, resampling_index_up );
} else {
ResamplingUpPerWarp( tile_32, tid, num_particles, distro, s_warp_1, prefix_sum, resampling_index_up );
}
}
}
template<typename T>
__device__ void ResamplingDownPerWarp( cg::thread_block_tile<kWarpSize> const &tile_32,
unsigned int const & tid,
int const & num_particles,
T const & distro,
T * shared,
T *__restrict__ prefix_sum,
int *__restrict__ resampling_index_down ) {
T const tidf { static_cast<T>( tid ) };
auto const t { tile_32.thread_rank( ) };
int l {};
int idx {};
T a {};
T b {};
bool mask { false };
// Preload in into shared memory
if ( tid >= kWarpSize + l ) {
shared[t] = prefix_sum[tid - kWarpSize - l];
shared[t + kWarpSize] = prefix_sum[tid - l];
}
// Distribution will be the same for each Monte Carlo
T const draw { ( distro + tidf ) / num_particles };
tile_32.sync( );
while ( !tile_32.all( mask ) ) {
if ( tid >= kTRI + l ) {
a = prefix_sum[tid - ( kTRI )-l];
b = prefix_sum[tid - kWarpSize - l];
#pragma unroll
for ( int i = 1; i < kWarpSize + 1; i++ ) {
mask = shared[t + kWarpSize - i] < draw;
if ( !mask ) {
idx--;
}
}
l += kWarpSize;
shared[t] = a;
shared[t + kWarpSize] = b;
tile_32.sync( );
} else {
while ( !mask ) {
if ( tid > l ) {
mask = prefix_sum[tid - ( l + 1 )] < draw;
} else {
mask = true;
}
if ( !mask ) {
idx--;
}
l++;
}
}
tile_32.sync( );
}
resampling_index_down[tid] = idx;
}
template<typename T>
__global__ void __launch_bounds__( kTRI )
ComputeResampleIndexSysDownSharedPrefetch64( int const num_particles,
unsigned long long int const seed,
int const resampling_method,
int *__restrict__ resampling_index_down,
T *__restrict__ prefix_sum ) {
auto const tile_32 = cg::tiled_partition<kWarpSize>( cg::this_thread_block( ) );
__shared__ T s_warp_0[kTRI];
__shared__ T s_warp_1[kTRI];
// Setting prefix_sum_particle_weights[n - 1] in each block versus call a
// separate kernel beforehand
if ( threadIdx.x == 0 ) {
prefix_sum[num_particles - 1] = 1.0f;
}
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
curandStateXORWOW_t local_state {};
T distro {};
if ( resampling_method == static_cast<int>( utility::Method::kSystematic ) ) {
curand_init( seed, 0, 0, &local_state );
distro = curand_uniform( &local_state );
} else if ( resampling_method == static_cast<int>( utility::Method::kStratified ) ) {
curand_init( seed + tid, 0, 0, &local_state );
distro = curand_uniform( &local_state );
}
if ( threadIdx.x < kWarpSize ) {
ResamplingDownPerWarp( tile_32, tid, num_particles, distro, s_warp_0, prefix_sum, resampling_index_down );
} else {
ResamplingDownPerWarp( tile_32, tid, num_particles, distro, s_warp_1, prefix_sum, resampling_index_down );
}
}
}
template<typename T>
__global__ void __launch_bounds__( kTMR ) ComputeResampleIndexMetropolisC2( int const num_particles,
unsigned long long int const seed,
T const *__restrict__ particle_weights,
int *__restrict__ resampling_index_down ) {
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
unsigned int idx { tid };
unsigned int key {};
unsigned int warp {};
T den { particle_weights[tid] };
T num {};
T random_num {};
curandStateXORWOW_t local_state {};
curandStateXORWOW_t rand_state {};
unsigned long long int local_seed { static_cast<unsigned long long int>( tid >> 5 ) + seed };
// same random number for 0-based warp entire grid
curand_init( local_seed, 0, 0, &local_state );
curand_init( local_seed, 0, 0, &rand_state );
// Calculate s(warp) using warp index. Threads in warp have same value
int ss { kWarpSize }; // Size of segment
int sc { num_particles / ss }; // The number of segments
int dc { ss };
for ( int i = 0; i < kMetropolisB; i++ ) {
warp = static_cast<unsigned int>( curand_uniform( &local_state ) *
( sc - 1 ) ); // Random number [0 -> number of warps]
random_num = curand_uniform( &rand_state );
key = static_cast<unsigned int>( curand_uniform( &rand_state ) * ( dc - 1 ) );
key = warp * dc + key;
num = particle_weights[key];
if ( random_num <= ( num / den ) ) {
den = num;
idx = key;
}
}
resampling_index_down[tid] = idx;
}
}
template<typename T>
__global__ void __launch_bounds__( kTPT ) ComputeParticleTransition( int const num_particles,
unsigned long long int const seed,
int const resampling_method,
int const *__restrict__ resampling_index_up,
int const *__restrict__ resampling_index_down,
T const *__restrict__ particle_state,
T *__restrict__ particle_state_new ) {
auto const block { cg::this_thread_block( ) };
typedef cub::BlockStore<T, kTPT, kSysDim, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
__shared__ typename BlockStore::TempStorage temp_storage;
unsigned int loop { blockIdx.x * blockDim.x * kSysDim };
for ( unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_particles;
tid += blockDim.x * gridDim.x ) {
int idx {};
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
idx = static_cast<int>( tid ) + resampling_index_up[tid] + resampling_index_down[tid];
} else {
idx = resampling_index_down[tid];
}
T model_update[kSysDim] {};
T thread_data[kSysDim] {};
T random_nums[kSysDim] {};
curandState local_state {};
curand_init( static_cast<unsigned long long int>( seed + tid ), 0, 0, &local_state );
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] = particle_state[idx * kSysDim + i];
random_nums[i] = curand_normal( &local_state );
}
models::SysModelMath( thread_data, model_update );
// Reuse thread_data to ease register pressure
#pragma unroll kSysDim
for ( int i = 0; i < kSysDim; i++ ) {
thread_data[i] = model_update[i];
#pragma unroll kSysDim
for ( int j = 0; j < kSysDim; j++ ) {
thread_data[i] += c_process_noise_cov[i * kSysDim + j] * random_nums[j];
}
}
BlockStore( temp_storage ).Store( particle_state_new + loop, thread_data );
block.sync( );
// grid size * number of system states
loop += blockDim.x * gridDim.x * kSysDim;
}
}
// Wrappers
template<typename T>
void InitializeFilterCuda( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
T const * pin_sq_initial_noise_cov,
cudaEvent_t * events,
T * particle_state_new ) {
int const threads_per_block { kTAI };
int const blocks_per_grid { kBlocks * sm_count };
unsigned long long int seed { static_cast<unsigned long long int>( clock( ) ) };
CUDA_RT_CALL( cudaMemcpyToSymbolAsync( c_initial_noise_cov,
pin_sq_initial_noise_cov,
kSysDim * kSysDim * sizeof( T ),
0,
cudaMemcpyHostToDevice,
streams[0] ) );
void *args[] { const_cast<int *>( &num_particles ), &seed, &particle_state_new };
CUDA_RT_CALL( cudaLaunchKernel(
reinterpret_cast<void *>( &InitializeFilter<T> ), blocks_per_grid, threads_per_block, args, 0, streams[0] ) );
}
template<typename T>
void ComputeMeasErrorsCuda( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
T const * pin_inv_meas_noise_cov,
T const * pin_meas_update,
T const * particle_state_new,
cudaEvent_t * events,
T * particle_weights,
T * particle_state ) {
int const threads_per_block { kTME };
int const blocks_per_grid { kBlocks * sm_count };
CUDA_RT_CALL( cudaMemcpyToSymbolAsync( c_inv_meas_noise_cov,
pin_inv_meas_noise_cov,
kMeasDim * kMeasDim * sizeof( T ),
0,
cudaMemcpyHostToDevice,
streams[1] ) );
CUDA_RT_CALL( cudaEventRecord( events[1], streams[1] ) );
CUDA_RT_CALL( cudaMemcpyToSymbolAsync(
c_meas_update, pin_meas_update, kMeasDim * sizeof( T ), 0, cudaMemcpyHostToDevice, streams[0] ) );
// Wait for cudaMemcpyToSymbolAsync -> c_inv_meas_noise_cov
CUDA_RT_CALL( cudaStreamWaitEvent( streams[0], events[1], 0 ) );
void *args[] { const_cast<int *>( &num_particles ), &particle_state_new, &particle_weights, &particle_state };
CUDA_RT_CALL( cudaLaunchKernel(
reinterpret_cast<void *>( &ComputeMeasErrors<T> ), blocks_per_grid, threads_per_block, args, 0, streams[0] ) );
}
template<typename T>
void ComputeEstimatesCuda( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
T const * particle_state,
cudaEvent_t * events,
T * filtered_estimates,
T * particle_weights ) {
int const threads_per_block { kTCE };
int const blocks_per_grid { kBlocks * sm_count };
void *args[] { const_cast<int *>( &num_particles ),
const_cast<int *>( &time_step ),
const_cast<int *>( &resampling_method ),
&particle_state,
&filtered_estimates,
&particle_weights };
CUDA_RT_CALL( cudaLaunchKernel(
reinterpret_cast<void *>( &ComputeEstimates<T> ), blocks_per_grid, threads_per_block, args, 0, streams[0] ) );
}
template<typename T>
void ComputeResampleIndexCuda( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
T const * particle_weights,
T * prefix_sum_particle_weights,
cudaEvent_t * events,
int * resampling_index_up,
int * resampling_index_down ) {
unsigned long long int seed { static_cast<unsigned long long int>( clock( ) ) };
// If Systematic and Stratified
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
int const threads_per_block { kTRI };
int blocks_per_grid {};
if ( num_particles > 100000 ) {
blocks_per_grid = 2 * kBlocks * sm_count;
} // Better performance with more blocks
else {
blocks_per_grid = kBlocks * sm_count;
} // Better performance with fewer blocks
//*********************** Perform Cumulative Sum
//***************************
void * d_temp_storage { nullptr };
size_t temp_storage_bytes {};
// Determine temporary device storage requirements for inclusive prefix
// sum on normalized particleWeights
cub::DeviceScan::InclusiveSum( d_temp_storage,
temp_storage_bytes,
particle_weights,
prefix_sum_particle_weights,
num_particles,
streams[0],
false );
// Allocate temporary storage
CUDA_RT_CALL( cudaMalloc( &d_temp_storage, temp_storage_bytes ) );
// Run inclusive prefix sum
cub::DeviceScan::InclusiveSum( d_temp_storage,
temp_storage_bytes,
particle_weights,
prefix_sum_particle_weights,
num_particles,
streams[0],
false );
// Sync cumulative sum
CUDA_RT_CALL( cudaEventRecord( events[1], streams[0] ) );
void *args_up[] { const_cast<int *>( &num_particles ),
&seed,
const_cast<int *>( &resampling_method ),
&resampling_index_up,
&prefix_sum_particle_weights };
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeResampleIndexSysUpSharedPrefetch64<T> ),
blocks_per_grid,
threads_per_block,
args_up,
0,
streams[0] ) );
CUDA_RT_CALL( cudaStreamWaitEvent( streams[1], events[1], 0 ) ); // Wait for InclusiveSum
void *args_down[] { const_cast<int *>( &num_particles ),
&seed,
const_cast<int *>( &resampling_method ),
&resampling_index_down,
&prefix_sum_particle_weights };
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeResampleIndexSysDownSharedPrefetch64<T> ),
blocks_per_grid,
threads_per_block,
args_down,
0,
streams[1] ) );
CUDA_RT_CALL( cudaEventRecord( events[0], streams[1] ) );
} else {
int const threads_per_block { kTMR };
int const blocks_per_grid { kBlocks * sm_count };
void *args[] { const_cast<int *>( &num_particles ), &seed, &particle_weights, &resampling_index_down };
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeResampleIndexMetropolisC2<T> ),
blocks_per_grid,
threads_per_block,
args,
0,
streams[0] ) );
CUDA_RT_CALL( cudaEventRecord( events[0], streams[0] ) );
}
}
template<typename T>
void ComputeParticleTransitionCuda( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
int const & resampling_method,
T const * pin_sq_process_noise_cov,
T const * particle_state,
int const * resampling_index_up,
int const * resampling_index_down,
cudaEvent_t * events,
T * particle_state_new ) {
// Get d_sum_of_particle_weights address for reset
float *h_sum_of_particle_weights;
CUDA_RT_CALL( cudaGetSymbolAddress( ( void ** )&h_sum_of_particle_weights, d_sum_of_particle_weights ) );
unsigned long long int seed { static_cast<unsigned long long int>( clock( ) ) };
int const threads_per_block { kTPT };
int const blocks_per_grid { kBlocks * sm_count };
CUDA_RT_CALL( cudaMemcpyToSymbolAsync( c_process_noise_cov,
pin_sq_process_noise_cov,
kSysDim * kSysDim * sizeof( T ),
0,
cudaMemcpyHostToDevice,
streams[0] ) );
void *args[] { const_cast<int *>( &num_particles ),
&seed,
const_cast<int *>( &resampling_method ),
&resampling_index_up,
&resampling_index_down,
&particle_state,
&particle_state_new };
// Systematic and Stratified must wait on
// ComputeResampleIndexSysDownSharedPrefetch64
if ( resampling_method != static_cast<int>( utility::Method::kMetropolisC2 ) ) {
CUDA_RT_CALL( cudaStreamWaitEvent( streams[0], events[0], 0 ) );
} // Wait for ComputeResampleIndexSysDownSharedPrefetch64
else {
CUDA_RT_CALL( cudaStreamWaitEvent( streams[1], events[0], 0 ) );
} // Wait for ComputeResampleIndexMetropolisC2
// Reset d_sum_of_particle_weights before next time step
// If Metropolis, make sure it's not reset before ComputeEstimates is
// finished
CUDA_RT_CALL( cudaMemsetAsync( h_sum_of_particle_weights, 0, sizeof( T ), streams[1] ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &ComputeParticleTransition<T> ),
blocks_per_grid,
threads_per_block,
args,
0,
streams[0] ) );
}
// Explicit specializations needed to generate code
template void InitializeFilterCuda<float>( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
float const * pin_sq_initial_noise_cov,
cudaEvent_t * events,
float * particle_state_new );
template void ComputeMeasErrorsCuda<float>( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
float const * pin_inv_meas_noise_cov,
float const * pin_meas_update,
float const * particle_state_new,
cudaEvent_t * events,
float * particle_weights,
float * particle_state );
template void ComputeEstimatesCuda<float>( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
float const * particle_state,
cudaEvent_t * events,
float * filtered_estimates,
float * particle_weights );
template void ComputeResampleIndexCuda<float>( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
int const & time_step,
int const & resampling_method,
float const * particle_weights,
float * prefix_sum_particle_weights,
cudaEvent_t * events,
int * resampling_index_up,
int * resampling_index_down );
template void ComputeParticleTransitionCuda<float>( int const & sm_count,
cudaStream_t const *streams,
int const & num_particles,
int const & resampling_method,
float const * pin_sq_process_noise_cov,
float const * particle_state,
int const * resampling_index_up,
int const * resampling_index_down,
cudaEvent_t * events,
float * particle_state_new );
} /* namespace filters */
|
4118c0f6613d358b874acd36805546fa94e06d3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Mandelbrot.h"
#include "DomaineMath_GPU.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, uint t, DomaineMath domaineMath);
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, uint dt, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Mandelbrot_Cuda_RGBA_uchar4", domaineMath), variateurAnimation(Interval<int>(30, 100), dt)
{
// Tools
this->t = 0;
}
Mandelbrot::~Mandelbrot(void)
{
// rien
}
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
// Device::lastCudaError("vague rgba uchar4 (before)"); // facultatif, for debug only, remove for release
hipLaunchKernelGGL(( mandelbrot), dim3(dg),dim3(db), 0, 0, ptrDevPixels, w, h, t, domaineMath);
// Device::lastCudaError("vague rgba uchar4 (after)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
this->t = variateurAnimation.varierAndGet();
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 4118c0f6613d358b874acd36805546fa94e06d3e.cu | #include "Mandelbrot.h"
#include "DomaineMath_GPU.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, uint t, DomaineMath domaineMath);
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, uint dt, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Mandelbrot_Cuda_RGBA_uchar4", domaineMath), variateurAnimation(Interval<int>(30, 100), dt)
{
// Tools
this->t = 0;
}
Mandelbrot::~Mandelbrot(void)
{
// rien
}
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
// Device::lastCudaError("vague rgba uchar4 (before)"); // facultatif, for debug only, remove for release
mandelbrot<<<dg,db>>>(ptrDevPixels, w, h, t, domaineMath);
// Device::lastCudaError("vague rgba uchar4 (after)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
this->t = variateurAnimation.varierAndGet();
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
8b423a8d78a93c01f7c95c01b3980b05c2229f13.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/async/copy.h>
#include "cupoch/knn/kdtree_flann.h"
#include "cupoch/geometry/geometry_utils.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectByIndexImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(hipDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::device_vectorize<float, 3, ::floor>(ref_coord)
.cast<int>();
}
};
template <int Index, class... Args>
struct normalize_and_devide_tuple_functor
: public thrust::binary_function<const thrust::tuple<Args...>,
const int,
thrust::tuple<Args...>> {
__host__ __device__ thrust::tuple<Args...> operator()(
const thrust::tuple<Args...> &x, const int &y) const {
thrust::tuple<Args...> ans = x;
devide_tuple_impl(ans, y,
thrust::make_index_sequence<sizeof...(Args)>{});
thrust::get<Index>(ans).normalize();
return ans;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(float distance_threshold)
: distance_threshold_(distance_threshold){};
const float distance_threshold_;
__device__ bool operator()(thrust::tuple<int, float> x) const {
const float dist = thrust::get<1>(x);
return (dist > 0 && dist < distance_threshold_);
}
};
struct is_valid_index_functor {
__device__ int operator()(int idx) const {
return (int)(idx >= 0);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectByIndex(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(utility::exec_policy(0), sorted_indices.begin(),
sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectByIndexImpl(*this, *output, inv_indices);
} else {
SelectByIndexImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::SelectByMask(
const utility::device_vector<bool> &mask, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (points_.size() != mask.size()) {
utility::LogError("[SelectByMask] The point size should be equal to the mask size.\n");
return output;
}
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
if (has_normals) output->normals_.resize(mask.size());
if (has_colors) output->colors_.resize(mask.size());
output->points_.resize(mask.size());
auto fn = [invert] __device__ (bool flag) { return invert ? !flag : flag;};
if (has_normals && has_colors) {
auto begin = make_tuple_begin(output->points_, output->normals_, output->colors_);
auto end = thrust::copy_if(make_tuple_begin(points_, normals_, colors_),
make_tuple_end(points_, normals_, colors_),
mask.begin(), begin, fn);
resize_all(thrust::distance(begin, end), output->points_, output->normals_, output->colors_);
} else if (has_colors) {
auto begin = make_tuple_begin(output->points_, output->colors_);
auto end = thrust::copy_if(make_tuple_begin(points_, colors_),
make_tuple_end(points_, colors_),
mask.begin(), begin, fn);
resize_all(thrust::distance(begin, end), output->points_, output->colors_);
} else if (has_normals) {
auto begin = make_tuple_begin(output->points_, output->normals_);
auto end = thrust::copy_if(make_tuple_begin(points_, normals_),
make_tuple_end(points_, normals_),
mask.begin(), begin, fn);
resize_all(thrust::distance(begin, end), output->points_, output->normals_);
} else {
auto end = thrust::copy_if(points_.begin(), points_.end(),
mask.begin(), output->points_.begin(), fn);
output->points_.resize(thrust::distance(output->points_.begin(), end));
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
utility::device_vector<int> counts(n);
thrust::equal_to<Eigen::Vector3i> binary_pred;
auto runs = [&keys, &binary_pred] (auto&& out_begins, auto&... params) {
thrust::sort_by_key(utility::exec_policy(0), keys.begin(),
keys.end(),
make_tuple_begin(params...));
add_tuple_functor<typename std::remove_reference_t<decltype(params)>::value_type..., int> add_func;
auto end = thrust::reduce_by_key(
utility::exec_policy(0), keys.begin(), keys.end(),
make_tuple_iterator(std::begin(params)...,
thrust::make_constant_iterator(1)),
thrust::make_discard_iterator(), out_begins, binary_pred, add_func);
return thrust::distance(out_begins, end.second);
};
if (!has_normals && !has_colors) {
auto begin = make_tuple_begin(output->points_, counts);
int n_out = runs(begin, sorted_points);
devide_tuple_functor<Eigen::Vector3f> dv_func;
auto output_begins = make_tuple_begin(output->points_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
auto begin =
make_tuple_begin(output->points_, output->normals_, counts);
int n_out = runs(begin, sorted_points, sorted_normals);
normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f>
dv_func;
auto output_begins =
make_tuple_begin(output->points_, output->normals_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
resize_all(n_out, output->points_, output->normals_);
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
resize_all(n, output->colors_);
auto begin = make_tuple_begin(output->points_, output->colors_, counts);
int n_out = runs(begin, sorted_points, sorted_colors);
devide_tuple_functor<Eigen::Vector3f, Eigen::Vector3f> dv_func;
auto output_begins = make_tuple_begin(output->points_, output->colors_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
resize_all(n_out, output->points_, output->colors_);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
resize_all(n, output->normals_, output->colors_);
auto begin = make_tuple_begin(output->points_, output->normals_,
output->colors_, counts);
int n_out = runs(begin, sorted_points, sorted_normals, sorted_colors);
normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f,
Eigen::Vector3f>
dv_func;
auto output_begins = make_tuple_begin(output->points_, output->normals_,
output->colors_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
resize_all(n_out, output->points_, output->normals_, output->colors_);
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::system::cuda::unique_eager_event copy_e[3];
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
copy_e[0] = thrust::async::copy(utility::exec_policy(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
copy_e[1] = thrust::async::copy(utility::exec_policy(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
copy_e[2] = thrust::async::copy(utility::exec_policy(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
copy_e[0].wait();
if (has_normals) { copy_e[1].wait(); }
if (has_colors) { copy_e[2].wait(); }
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
knn::KDTreeFlann kdtree;
kdtree.SetRawData(ConvertVector3fVectorRef(*this));
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, nb_points + 1, tmp_indices,
dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> counts(n_pt);
utility::device_vector<size_t> indices(n_pt);
thrust::repeated_range<thrust::counting_iterator<size_t>> range(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt), nb_points + 1);
thrust::reduce_by_key(
utility::exec_policy(0), range.begin(), range.end(),
thrust::make_transform_iterator(
tmp_indices.begin(),
is_valid_index_functor()),
thrust::make_discard_iterator(), counts.begin(),
thrust::equal_to<size_t>(), thrust::plus<size_t>());
auto begin = make_tuple_iterator(indices.begin(),
thrust::make_discard_iterator());
auto end = thrust::copy_if(
enumerate_begin(counts), enumerate_end(counts), begin,
[nb_points] __device__(const thrust::tuple<size_t, size_t> &x) {
return thrust::get<1>(x) > nb_points;
});
indices.resize(thrust::distance(begin, end));
return std::make_tuple(SelectByIndex(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
knn::KDTreeFlann kdtree;
kdtree.SetRawData(ConvertVector3fVectorRef(*this));
const size_t n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<size_t> counts(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
thrust::repeated_range<thrust::counting_iterator<size_t>> range(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt), nb_neighbors);
thrust::reduce_by_key(
utility::exec_policy(0), range.begin(), range.end(),
make_tuple_iterator(thrust::make_constant_iterator<size_t>(1),
dist.begin()),
thrust::make_discard_iterator(),
make_tuple_iterator(counts.begin(), avg_distances.begin()),
thrust::equal_to<size_t>(),
[] __device__(const thrust::tuple<size_t, float> &rhs,
const thrust::tuple<size_t, float> &lhs) {
float rd = thrust::get<1>(rhs);
size_t rc = thrust::get<0>(rhs);
if (isinf(rd) || rd < 0.0) {
rd = 0.0;
rc = 0;
}
float ld = thrust::get<1>(lhs);
size_t lc = thrust::get<0>(lhs);
if (isinf(ld) || ld < 0.0) {
ld = 0.0;
lc = 0;
}
return thrust::make_tuple(rc + lc, rd + ld);
});
thrust::transform(avg_distances.begin(), avg_distances.end(),
counts.begin(), avg_distances.begin(),
[] __device__(float avg, size_t cnt) {
return (cnt > 0) ? avg / (float)cnt : -1.0;
});
auto mean_and_count = thrust::transform_reduce(
utility::exec_policy(0), avg_distances.begin(),
avg_distances.end(),
[] __device__(float const &x) {
return thrust::make_tuple(max(x, 0.0f), (size_t)(x >= 0.0));
},
thrust::make_tuple(0.0f, size_t(0)),
add_tuple_functor<float, size_t>());
const size_t valid_distances = thrust::get<1>(mean_and_count);
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean = thrust::get<0>(mean_and_count);
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
utility::exec_policy(0), avg_distances.begin(),
avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(distance_threshold);
auto begin = make_tuple_iterator(indices.begin(),
thrust::make_discard_iterator());
auto end = thrust::copy_if(enumerate_begin(avg_distances),
enumerate_end(avg_distances), begin, th_func);
indices.resize(thrust::distance(begin, end));
return std::make_tuple(SelectByIndex(indices), indices);
}
| 8b423a8d78a93c01f7c95c01b3980b05c2229f13.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/async/copy.h>
#include "cupoch/knn/kdtree_flann.h"
#include "cupoch/geometry/geometry_utils.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectByIndexImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(cudaDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::device_vectorize<float, 3, ::floor>(ref_coord)
.cast<int>();
}
};
template <int Index, class... Args>
struct normalize_and_devide_tuple_functor
: public thrust::binary_function<const thrust::tuple<Args...>,
const int,
thrust::tuple<Args...>> {
__host__ __device__ thrust::tuple<Args...> operator()(
const thrust::tuple<Args...> &x, const int &y) const {
thrust::tuple<Args...> ans = x;
devide_tuple_impl(ans, y,
thrust::make_index_sequence<sizeof...(Args)>{});
thrust::get<Index>(ans).normalize();
return ans;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(float distance_threshold)
: distance_threshold_(distance_threshold){};
const float distance_threshold_;
__device__ bool operator()(thrust::tuple<int, float> x) const {
const float dist = thrust::get<1>(x);
return (dist > 0 && dist < distance_threshold_);
}
};
struct is_valid_index_functor {
__device__ int operator()(int idx) const {
return (int)(idx >= 0);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectByIndex(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(utility::exec_policy(0), sorted_indices.begin(),
sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectByIndexImpl(*this, *output, inv_indices);
} else {
SelectByIndexImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::SelectByMask(
const utility::device_vector<bool> &mask, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (points_.size() != mask.size()) {
utility::LogError("[SelectByMask] The point size should be equal to the mask size.\n");
return output;
}
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
if (has_normals) output->normals_.resize(mask.size());
if (has_colors) output->colors_.resize(mask.size());
output->points_.resize(mask.size());
auto fn = [invert] __device__ (bool flag) { return invert ? !flag : flag;};
if (has_normals && has_colors) {
auto begin = make_tuple_begin(output->points_, output->normals_, output->colors_);
auto end = thrust::copy_if(make_tuple_begin(points_, normals_, colors_),
make_tuple_end(points_, normals_, colors_),
mask.begin(), begin, fn);
resize_all(thrust::distance(begin, end), output->points_, output->normals_, output->colors_);
} else if (has_colors) {
auto begin = make_tuple_begin(output->points_, output->colors_);
auto end = thrust::copy_if(make_tuple_begin(points_, colors_),
make_tuple_end(points_, colors_),
mask.begin(), begin, fn);
resize_all(thrust::distance(begin, end), output->points_, output->colors_);
} else if (has_normals) {
auto begin = make_tuple_begin(output->points_, output->normals_);
auto end = thrust::copy_if(make_tuple_begin(points_, normals_),
make_tuple_end(points_, normals_),
mask.begin(), begin, fn);
resize_all(thrust::distance(begin, end), output->points_, output->normals_);
} else {
auto end = thrust::copy_if(points_.begin(), points_.end(),
mask.begin(), output->points_.begin(), fn);
output->points_.resize(thrust::distance(output->points_.begin(), end));
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
utility::device_vector<int> counts(n);
thrust::equal_to<Eigen::Vector3i> binary_pred;
auto runs = [&keys, &binary_pred] (auto&& out_begins, auto&... params) {
thrust::sort_by_key(utility::exec_policy(0), keys.begin(),
keys.end(),
make_tuple_begin(params...));
add_tuple_functor<typename std::remove_reference_t<decltype(params)>::value_type..., int> add_func;
auto end = thrust::reduce_by_key(
utility::exec_policy(0), keys.begin(), keys.end(),
make_tuple_iterator(std::begin(params)...,
thrust::make_constant_iterator(1)),
thrust::make_discard_iterator(), out_begins, binary_pred, add_func);
return thrust::distance(out_begins, end.second);
};
if (!has_normals && !has_colors) {
auto begin = make_tuple_begin(output->points_, counts);
int n_out = runs(begin, sorted_points);
devide_tuple_functor<Eigen::Vector3f> dv_func;
auto output_begins = make_tuple_begin(output->points_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
auto begin =
make_tuple_begin(output->points_, output->normals_, counts);
int n_out = runs(begin, sorted_points, sorted_normals);
normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f>
dv_func;
auto output_begins =
make_tuple_begin(output->points_, output->normals_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
resize_all(n_out, output->points_, output->normals_);
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
resize_all(n, output->colors_);
auto begin = make_tuple_begin(output->points_, output->colors_, counts);
int n_out = runs(begin, sorted_points, sorted_colors);
devide_tuple_functor<Eigen::Vector3f, Eigen::Vector3f> dv_func;
auto output_begins = make_tuple_begin(output->points_, output->colors_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
resize_all(n_out, output->points_, output->colors_);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
resize_all(n, output->normals_, output->colors_);
auto begin = make_tuple_begin(output->points_, output->normals_,
output->colors_, counts);
int n_out = runs(begin, sorted_points, sorted_normals, sorted_colors);
normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f,
Eigen::Vector3f>
dv_func;
auto output_begins = make_tuple_begin(output->points_, output->normals_,
output->colors_);
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
resize_all(n_out, output->points_, output->normals_, output->colors_);
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::system::cuda::unique_eager_event copy_e[3];
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
copy_e[0] = thrust::async::copy(utility::exec_policy(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
copy_e[1] = thrust::async::copy(utility::exec_policy(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
copy_e[2] = thrust::async::copy(utility::exec_policy(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
copy_e[0].wait();
if (has_normals) { copy_e[1].wait(); }
if (has_colors) { copy_e[2].wait(); }
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
knn::KDTreeFlann kdtree;
kdtree.SetRawData(ConvertVector3fVectorRef(*this));
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, nb_points + 1, tmp_indices,
dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> counts(n_pt);
utility::device_vector<size_t> indices(n_pt);
thrust::repeated_range<thrust::counting_iterator<size_t>> range(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt), nb_points + 1);
thrust::reduce_by_key(
utility::exec_policy(0), range.begin(), range.end(),
thrust::make_transform_iterator(
tmp_indices.begin(),
is_valid_index_functor()),
thrust::make_discard_iterator(), counts.begin(),
thrust::equal_to<size_t>(), thrust::plus<size_t>());
auto begin = make_tuple_iterator(indices.begin(),
thrust::make_discard_iterator());
auto end = thrust::copy_if(
enumerate_begin(counts), enumerate_end(counts), begin,
[nb_points] __device__(const thrust::tuple<size_t, size_t> &x) {
return thrust::get<1>(x) > nb_points;
});
indices.resize(thrust::distance(begin, end));
return std::make_tuple(SelectByIndex(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
knn::KDTreeFlann kdtree;
kdtree.SetRawData(ConvertVector3fVectorRef(*this));
const size_t n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<size_t> counts(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
thrust::repeated_range<thrust::counting_iterator<size_t>> range(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt), nb_neighbors);
thrust::reduce_by_key(
utility::exec_policy(0), range.begin(), range.end(),
make_tuple_iterator(thrust::make_constant_iterator<size_t>(1),
dist.begin()),
thrust::make_discard_iterator(),
make_tuple_iterator(counts.begin(), avg_distances.begin()),
thrust::equal_to<size_t>(),
[] __device__(const thrust::tuple<size_t, float> &rhs,
const thrust::tuple<size_t, float> &lhs) {
float rd = thrust::get<1>(rhs);
size_t rc = thrust::get<0>(rhs);
if (isinf(rd) || rd < 0.0) {
rd = 0.0;
rc = 0;
}
float ld = thrust::get<1>(lhs);
size_t lc = thrust::get<0>(lhs);
if (isinf(ld) || ld < 0.0) {
ld = 0.0;
lc = 0;
}
return thrust::make_tuple(rc + lc, rd + ld);
});
thrust::transform(avg_distances.begin(), avg_distances.end(),
counts.begin(), avg_distances.begin(),
[] __device__(float avg, size_t cnt) {
return (cnt > 0) ? avg / (float)cnt : -1.0;
});
auto mean_and_count = thrust::transform_reduce(
utility::exec_policy(0), avg_distances.begin(),
avg_distances.end(),
[] __device__(float const &x) {
return thrust::make_tuple(max(x, 0.0f), (size_t)(x >= 0.0));
},
thrust::make_tuple(0.0f, size_t(0)),
add_tuple_functor<float, size_t>());
const size_t valid_distances = thrust::get<1>(mean_and_count);
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean = thrust::get<0>(mean_and_count);
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
utility::exec_policy(0), avg_distances.begin(),
avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(distance_threshold);
auto begin = make_tuple_iterator(indices.begin(),
thrust::make_discard_iterator());
auto end = thrust::copy_if(enumerate_begin(avg_distances),
enumerate_end(avg_distances), begin, th_func);
indices.resize(thrust::distance(begin, end));
return std::make_tuple(SelectByIndex(indices), indices);
}
|
c3f47e9d460902b8fec46b788056a72c512f7814.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <stdio.h>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include "ttm_cpu.h"
#include "ttm_gpu.h"
#include <bits/stdc++.h>
using namespace std;
int main(int argc, char* argv[]){
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
Options Opt = parse_cmd_options(argc, argv);
Tensor X;
load_tensor(X, Opt);
create_HCSR(X, Opt);
// check if appropriate file is loaded
string fileNameEndwith;
fileNameEndwith = to_string(X.modeOrder[0]) ;//+ to_string(X.modeOrder[1]) + to_string(X.modeOrder[2]);
std::size_t found = Opt.inFileName.find(fileNameEndwith);
if (found==std::string::npos){
cout << "Not the correct file for this mode" << endl;
exit(0);
}
Matrix *U = new Matrix[X.ndims];
create_mats(X, U, Opt, false);
randomize_mats(X, U, Opt);
zero_mat(X, U, Opt.mode);
//allocate space fro intermediate tensor Y (Y = X * Un)
semiSpTensor Y;
cout << "calling allocation " << endl;
prepare_Y(X, Y, Opt);
if(Opt.verbose)
cout << endl << "Starting TTM..." << endl;
// print tensors and statistics
if(Opt.impType == 0){
double t0 = seconds();
create_HCSR(X, Opt);
tensor_stats(X);
// print_HCSRtensor(X);
}
// CPU
if(Opt.impType == 1){
double t0 = seconds();
// ((X.ndims == 3) ? TTM_COO_CPU(X, U, Opt) : TTM_COO_CPU_4D(X, U, Opt));
TTM_CPU(X, Y, U, Opt);
printf("TTM - COO CPU time: %.3f sec \n", seconds() - t0);
}
// GPU
else if(Opt.impType == 2 || Opt.impType == 3){
TTM_GPU(X, Y, U, Opt);
}
// // HYB CPU
// else if(Opt.impType == 9){
// create_HCSR(X, Opt);
// HYBTensor HybX(X);
// cout << "Creating HYB... " ;
// double t0 = seconds();
// ((X.ndims == 3) ? create_HYB(HybX, X, Opt) : create_HYB_4D(HybX, X, Opt));
// printf("create HYB - time: %.3f sec \n", seconds() - t0);
// make_HybBin(HybX, Opt);
// // print_HYBtensor(HybX);
// // ((X.ndims == 3) ? MTTKRP_HYB_CPU(HybX, U, Opt) : MTTKRP_HYB_CPU_4D(HybX, U, Opt));
// MTTKRP_HYB_GPU(HybX, U, Opt);
// }
// // // HYB GPU
// // else if(Opt.impType == 10){
// // // MTTKRP_TILED_HCSR_GPU(TiledX, U, Opt);
// // }
// // Tiled versions
// else if(Opt.impType >= 5 && Opt.impType < 9){
// TiledTensor TiledX[Opt.nTile];
// create_HCSR(X, Opt);
// // print_HCSRtensor(X);
// int tilingMode = X.modeOrder[X.ndims -1];
// Opt.tileSize = (X.dims[tilingMode] + Opt.nTile - 1)/Opt.nTile;
// if(Opt.nTile > X.dims[tilingMode]){
// cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[tilingMode] << "). Exiting."<< endl ;
// exit(0);
// }
// // split X into tiles based on K indices
// make_KTiling(X, TiledX, Opt);
// // create HCSR for each tile
// for (int tile = 0; tile < Opt.nTile; ++tile){
// create_TiledHCSR(TiledX, Opt, tile);
// // print_TiledHCSRtensor(TiledX, tile);
// }
// // Split tiles into bins accordin to nnz in slice
// for (int tile = 0; tile < Opt.nTile; ++tile){
// make_TiledBin(TiledX, Opt, tile);
// }
// // COO GPU
// if(Opt.impType == 5){
// double t0 = seconds();
// MTTKRP_TILED_COO_CPU(TiledX, U, Opt);
// printf("TILED COO CPU - time: %.3f sec \n", seconds() - t0);
// }
// // HCSR GPU
// else if(Opt.impType == 6){
// double t0 = seconds();
// ((X.ndims == 3) ? MTTKRP_TILED_HCSR_CPU(TiledX, U, Opt) : MTTKRP_TILED_HCSR_CPU_4D(TiledX, U, Opt));
// printf("TILED HCSR CPU - time: %.3f sec \n", seconds() - t0);
// }
// //COO GPU
// else if(Opt.impType == 7){
// cout << "GPU COO has bugs! " << endl;
// MTTKRP_TILED_COO_GPU(TiledX, U, Opt);
// }
// // HCSR GPU
// else if(Opt.impType == 8){
// MTTKRP_TILED_HCSR_GPU(TiledX, U, Opt);
// }
// }
// else // e.g. -1
// cout << "no MTTKRP" << endl;
if(!Opt.outFileName.empty()){
cout << "Writing Y to " << Opt.outFileName << endl;
write_output_ttmY(Y, X.modeOrder[0], Opt.outFileName);
}
/** Correctness check **/
if(Opt.correctness){
cout << "DO COO...now incorrect with fbr threshold " << endl;
cout << "correctness with CPU " << endl;
if (Opt.impType == 1) {
cout << "Already running COO seq on CPU!" << endl;
exit(0);
}
int mode = Opt.mode;
DTYPE *out = (DTYPE*)malloc(Y.nRows * Y.nCols * sizeof(DTYPE));
memcpy(out, Y.vals, Y.nRows * Y.nCols * sizeof(DTYPE));
memset(Y.vals, 0, Y.nRows * Y.nCols * sizeof(DTYPE));
// ((X.ndims == 3) ? TTM_CPU(X, U, Opt) : MTTKRP_COO_CPU_4D(X, U, Opt));
TTM_CPU(X, Y, U, Opt);
correctness_check(out, Y.vals, Y.nRows, Y.nCols);
}
free_all(X, Y, U);
}
| c3f47e9d460902b8fec46b788056a72c512f7814.cu | #include <fstream>
#include <stdio.h>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <cuda.h>
#include "ttm_cpu.h"
#include "ttm_gpu.h"
#include <bits/stdc++.h>
using namespace std;
int main(int argc, char* argv[]){
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
Options Opt = parse_cmd_options(argc, argv);
Tensor X;
load_tensor(X, Opt);
create_HCSR(X, Opt);
// check if appropriate file is loaded
string fileNameEndwith;
fileNameEndwith = to_string(X.modeOrder[0]) ;//+ to_string(X.modeOrder[1]) + to_string(X.modeOrder[2]);
std::size_t found = Opt.inFileName.find(fileNameEndwith);
if (found==std::string::npos){
cout << "Not the correct file for this mode" << endl;
exit(0);
}
Matrix *U = new Matrix[X.ndims];
create_mats(X, U, Opt, false);
randomize_mats(X, U, Opt);
zero_mat(X, U, Opt.mode);
//allocate space fro intermediate tensor Y (Y = X * Un)
semiSpTensor Y;
cout << "calling allocation " << endl;
prepare_Y(X, Y, Opt);
if(Opt.verbose)
cout << endl << "Starting TTM..." << endl;
// print tensors and statistics
if(Opt.impType == 0){
double t0 = seconds();
create_HCSR(X, Opt);
tensor_stats(X);
// print_HCSRtensor(X);
}
// CPU
if(Opt.impType == 1){
double t0 = seconds();
// ((X.ndims == 3) ? TTM_COO_CPU(X, U, Opt) : TTM_COO_CPU_4D(X, U, Opt));
TTM_CPU(X, Y, U, Opt);
printf("TTM - COO CPU time: %.3f sec \n", seconds() - t0);
}
// GPU
else if(Opt.impType == 2 || Opt.impType == 3){
TTM_GPU(X, Y, U, Opt);
}
// // HYB CPU
// else if(Opt.impType == 9){
// create_HCSR(X, Opt);
// HYBTensor HybX(X);
// cout << "Creating HYB... " ;
// double t0 = seconds();
// ((X.ndims == 3) ? create_HYB(HybX, X, Opt) : create_HYB_4D(HybX, X, Opt));
// printf("create HYB - time: %.3f sec \n", seconds() - t0);
// make_HybBin(HybX, Opt);
// // print_HYBtensor(HybX);
// // ((X.ndims == 3) ? MTTKRP_HYB_CPU(HybX, U, Opt) : MTTKRP_HYB_CPU_4D(HybX, U, Opt));
// MTTKRP_HYB_GPU(HybX, U, Opt);
// }
// // // HYB GPU
// // else if(Opt.impType == 10){
// // // MTTKRP_TILED_HCSR_GPU(TiledX, U, Opt);
// // }
// // Tiled versions
// else if(Opt.impType >= 5 && Opt.impType < 9){
// TiledTensor TiledX[Opt.nTile];
// create_HCSR(X, Opt);
// // print_HCSRtensor(X);
// int tilingMode = X.modeOrder[X.ndims -1];
// Opt.tileSize = (X.dims[tilingMode] + Opt.nTile - 1)/Opt.nTile;
// if(Opt.nTile > X.dims[tilingMode]){
// cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[tilingMode] << "). Exiting."<< endl ;
// exit(0);
// }
// // split X into tiles based on K indices
// make_KTiling(X, TiledX, Opt);
// // create HCSR for each tile
// for (int tile = 0; tile < Opt.nTile; ++tile){
// create_TiledHCSR(TiledX, Opt, tile);
// // print_TiledHCSRtensor(TiledX, tile);
// }
// // Split tiles into bins accordin to nnz in slice
// for (int tile = 0; tile < Opt.nTile; ++tile){
// make_TiledBin(TiledX, Opt, tile);
// }
// // COO GPU
// if(Opt.impType == 5){
// double t0 = seconds();
// MTTKRP_TILED_COO_CPU(TiledX, U, Opt);
// printf("TILED COO CPU - time: %.3f sec \n", seconds() - t0);
// }
// // HCSR GPU
// else if(Opt.impType == 6){
// double t0 = seconds();
// ((X.ndims == 3) ? MTTKRP_TILED_HCSR_CPU(TiledX, U, Opt) : MTTKRP_TILED_HCSR_CPU_4D(TiledX, U, Opt));
// printf("TILED HCSR CPU - time: %.3f sec \n", seconds() - t0);
// }
// //COO GPU
// else if(Opt.impType == 7){
// cout << "GPU COO has bugs! " << endl;
// MTTKRP_TILED_COO_GPU(TiledX, U, Opt);
// }
// // HCSR GPU
// else if(Opt.impType == 8){
// MTTKRP_TILED_HCSR_GPU(TiledX, U, Opt);
// }
// }
// else // e.g. -1
// cout << "no MTTKRP" << endl;
if(!Opt.outFileName.empty()){
cout << "Writing Y to " << Opt.outFileName << endl;
write_output_ttmY(Y, X.modeOrder[0], Opt.outFileName);
}
/** Correctness check **/
if(Opt.correctness){
cout << "DO COO...now incorrect with fbr threshold " << endl;
cout << "correctness with CPU " << endl;
if (Opt.impType == 1) {
cout << "Already running COO seq on CPU!" << endl;
exit(0);
}
int mode = Opt.mode;
DTYPE *out = (DTYPE*)malloc(Y.nRows * Y.nCols * sizeof(DTYPE));
memcpy(out, Y.vals, Y.nRows * Y.nCols * sizeof(DTYPE));
memset(Y.vals, 0, Y.nRows * Y.nCols * sizeof(DTYPE));
// ((X.ndims == 3) ? TTM_CPU(X, U, Opt) : MTTKRP_COO_CPU_4D(X, U, Opt));
TTM_CPU(X, Y, U, Opt);
correctness_check(out, Y.vals, Y.nRows, Y.nCols);
}
free_all(X, Y, U);
}
|
35e3f1abad8b4dbc5850409d90ad2615021e454d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <helper_timer.h>
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ...
// + lst[n-1]}
#define BLOCK_SIZE 512 //@@ You can change this
__global__ void setInput(int *inp, int *aux, int length) {
unsigned int tid = threadIdx.x;
unsigned int sIndex = 2 * blockIdx.x * BLOCK_SIZE;
if (blockIdx.x) {
if (sIndex + tid < length)
inp[sIndex+ tid] += aux[blockIdx.x ];
if (sIndex+ BLOCK_SIZE + tid < length)
inp[sIndex+ BLOCK_SIZE + tid] += aux[blockIdx.x ];
}
}
__global__ void scan(int *input, int *output, int *aux, int len) {
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
__shared__ float sArray[BLOCK_SIZE << 1];
unsigned int tid = threadIdx.x;
unsigned int sIndex = 2 * blockIdx.x * BLOCK_SIZE;
if (sIndex + tid < len)
sArray[tid] = input[sIndex + tid];
else
sArray[tid] = 0;
if (sIndex + BLOCK_SIZE + tid < len)
sArray[BLOCK_SIZE + tid] = input[sIndex + BLOCK_SIZE + tid];
else
sArray[BLOCK_SIZE + tid] = 0;
__syncthreads();
// Reduction
int str;
for (str = 1; str <= BLOCK_SIZE; str <<= 1) {
int index = (tid + 1) * str * 2 - 1;
if (index < 2 * BLOCK_SIZE)
sArray[index] += sArray[index - str];
__syncthreads();
}
// Post reduction
for (str = BLOCK_SIZE >> 1; str; str >>= 1) {
int index = (tid + 1) * str * 2 - 1;
if (index + str < 2 * BLOCK_SIZE)
sArray[index + str] += sArray[index];
__syncthreads();
}
if (sIndex + tid < len)
output[sIndex + tid] = sArray[tid];
if (sIndex + BLOCK_SIZE + tid < len)
output[sIndex + BLOCK_SIZE + tid] = sArray[BLOCK_SIZE + tid];
if (aux && tid == 0)
aux[blockIdx.x] = sArray[2 * BLOCK_SIZE - 1];
}
int main(int argc, char **argv) {
int *hostInput; // The input 1D list
int *hostOutput; // The output list
int *expectedOutput;
int *deviceInput;
int *deviceOutput;
int *deviceAuxArray, *deviceAuxScannedArray;
int numElements; // number of elements in the list
FILE *infile, *outfile;
int inputLength, outputLength;
StopWatchLinux stw;
unsigned int blog = 1;
// Import host input data
stw.start();
if ((infile = fopen("input.raw", "r")) == NULL)
{ printf("Cannot open input.raw.\n"); exit(EXIT_FAILURE); }
fscanf(infile, "%i", &inputLength);
hostInput = (int *)malloc(sizeof(int) * inputLength);
for (int i = 0; i < inputLength; i++)
fscanf(infile, "%i", &hostInput[i]);
fclose(infile);
numElements = inputLength;
hostOutput = (int *)malloc(numElements * sizeof(int));
stw.stop();
printf("Importing data and creating memory on host: %f ms\n", stw.getTime());
if (blog) printf("*** The number of input elements in the input is %i\n", numElements);
stw.reset();
stw.start();
hipMalloc((void **)&deviceInput, numElements * sizeof(int));
hipMalloc((void **)&deviceOutput, numElements * sizeof(int));
hipMalloc(&deviceAuxArray, (BLOCK_SIZE << 1) * sizeof(int));
hipMalloc(&deviceAuxScannedArray, (BLOCK_SIZE << 1) * sizeof(int));
stw.stop();
printf("Allocating GPU memory: %f ms\n", stw.getTime());
stw.reset();
stw.start();
hipMemset(deviceOutput, 0, numElements * sizeof(int));
stw.stop();
printf("Clearing output memory: %f ms\n", stw.getTime());
stw.reset();
stw.start();
hipMemcpy(deviceInput, hostInput, numElements * sizeof(int),
hipMemcpyHostToDevice);
stw.stop();
printf("Copying input memory to the GPU: %f ms\n", stw.getTime());
//@@ Initialize the grid and block dimensions here
int gridSize = ceil((float)numElements/(BLOCK_SIZE<<1));
dim3 dimGrid(gridSize, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
stw.reset();
stw.start();
//@@ Modify this to complete the functionality of the scan
//@@ on the device
hipLaunchKernelGGL(( scan), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInput, deviceOutput, deviceAuxArray, numElements);
hipLaunchKernelGGL(( scan), dim3(dim3(1,1,1)), dim3(dimBlock), 0, 0, deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE << 1);
hipLaunchKernelGGL(( setInput), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceOutput, deviceAuxScannedArray, numElements);
hipDeviceSynchronize();
stw.stop();
printf("Performing CUDA computation: %f ms\n", stw.getTime());
stw.reset();
stw.start();
hipMemcpy(hostOutput, deviceOutput, numElements * sizeof(int),
hipMemcpyDeviceToHost);
stw.stop();
printf("Copying output memory to the CPU: %f ms\n", stw.getTime());
stw.reset();
stw.start();
hipFree(deviceInput);
hipFree(deviceOutput);
hipFree(deviceAuxArray);
hipFree(deviceAuxScannedArray);
stw.stop();
printf("Freeing GPU Memory: %f ms\n", stw.getTime());
if ((outfile = fopen("output.raw", "r")) == NULL)
{ printf("Cannot open output.raw.\n"); exit(EXIT_FAILURE); }
fscanf(outfile, "%i", &outputLength);
expectedOutput = (int *)malloc(sizeof(int) * outputLength);
for (int i = 0; i < outputLength; i++)
fscanf(outfile, "%i", &expectedOutput[i]);
fclose(outfile);
int test = 1;
for (int i=0;i<outputLength;i++)
{
printf("%i\n",hostOutput[i]);
}
for (int i = 0; i < outputLength; i++) {
if (expectedOutput[i] != hostOutput[i])
printf("%i %i %i\n", i, expectedOutput[i], hostOutput[i]);
test = test && (expectedOutput[i] == hostOutput[i]);
}
if (test) printf("Results correct.\n");
else printf("Results incorrect.\n");
free(hostInput);
hipHostFree(hostOutput);
free(expectedOutput);
return 0;
}
| 35e3f1abad8b4dbc5850409d90ad2615021e454d.cu | #include <stdio.h>
#include <stdlib.h>
#include <helper_timer.h>
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ...
// + lst[n-1]}
#define BLOCK_SIZE 512 //@@ You can change this
__global__ void setInput(int *inp, int *aux, int length) {
unsigned int tid = threadIdx.x;
unsigned int sIndex = 2 * blockIdx.x * BLOCK_SIZE;
if (blockIdx.x) {
if (sIndex + tid < length)
inp[sIndex+ tid] += aux[blockIdx.x ];
if (sIndex+ BLOCK_SIZE + tid < length)
inp[sIndex+ BLOCK_SIZE + tid] += aux[blockIdx.x ];
}
}
__global__ void scan(int *input, int *output, int *aux, int len) {
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
__shared__ float sArray[BLOCK_SIZE << 1];
unsigned int tid = threadIdx.x;
unsigned int sIndex = 2 * blockIdx.x * BLOCK_SIZE;
if (sIndex + tid < len)
sArray[tid] = input[sIndex + tid];
else
sArray[tid] = 0;
if (sIndex + BLOCK_SIZE + tid < len)
sArray[BLOCK_SIZE + tid] = input[sIndex + BLOCK_SIZE + tid];
else
sArray[BLOCK_SIZE + tid] = 0;
__syncthreads();
// Reduction
int str;
for (str = 1; str <= BLOCK_SIZE; str <<= 1) {
int index = (tid + 1) * str * 2 - 1;
if (index < 2 * BLOCK_SIZE)
sArray[index] += sArray[index - str];
__syncthreads();
}
// Post reduction
for (str = BLOCK_SIZE >> 1; str; str >>= 1) {
int index = (tid + 1) * str * 2 - 1;
if (index + str < 2 * BLOCK_SIZE)
sArray[index + str] += sArray[index];
__syncthreads();
}
if (sIndex + tid < len)
output[sIndex + tid] = sArray[tid];
if (sIndex + BLOCK_SIZE + tid < len)
output[sIndex + BLOCK_SIZE + tid] = sArray[BLOCK_SIZE + tid];
if (aux && tid == 0)
aux[blockIdx.x] = sArray[2 * BLOCK_SIZE - 1];
}
int main(int argc, char **argv) {
int *hostInput; // The input 1D list
int *hostOutput; // The output list
int *expectedOutput;
int *deviceInput;
int *deviceOutput;
int *deviceAuxArray, *deviceAuxScannedArray;
int numElements; // number of elements in the list
FILE *infile, *outfile;
int inputLength, outputLength;
StopWatchLinux stw;
unsigned int blog = 1;
// Import host input data
stw.start();
if ((infile = fopen("input.raw", "r")) == NULL)
{ printf("Cannot open input.raw.\n"); exit(EXIT_FAILURE); }
fscanf(infile, "%i", &inputLength);
hostInput = (int *)malloc(sizeof(int) * inputLength);
for (int i = 0; i < inputLength; i++)
fscanf(infile, "%i", &hostInput[i]);
fclose(infile);
numElements = inputLength;
hostOutput = (int *)malloc(numElements * sizeof(int));
stw.stop();
printf("Importing data and creating memory on host: %f ms\n", stw.getTime());
if (blog) printf("*** The number of input elements in the input is %i\n", numElements);
stw.reset();
stw.start();
cudaMalloc((void **)&deviceInput, numElements * sizeof(int));
cudaMalloc((void **)&deviceOutput, numElements * sizeof(int));
cudaMalloc(&deviceAuxArray, (BLOCK_SIZE << 1) * sizeof(int));
cudaMalloc(&deviceAuxScannedArray, (BLOCK_SIZE << 1) * sizeof(int));
stw.stop();
printf("Allocating GPU memory: %f ms\n", stw.getTime());
stw.reset();
stw.start();
cudaMemset(deviceOutput, 0, numElements * sizeof(int));
stw.stop();
printf("Clearing output memory: %f ms\n", stw.getTime());
stw.reset();
stw.start();
cudaMemcpy(deviceInput, hostInput, numElements * sizeof(int),
cudaMemcpyHostToDevice);
stw.stop();
printf("Copying input memory to the GPU: %f ms\n", stw.getTime());
//@@ Initialize the grid and block dimensions here
int gridSize = ceil((float)numElements/(BLOCK_SIZE<<1));
dim3 dimGrid(gridSize, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
stw.reset();
stw.start();
//@@ Modify this to complete the functionality of the scan
//@@ on the device
scan<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, deviceAuxArray, numElements);
scan<<<dim3(1,1,1), dimBlock>>>(deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE << 1);
setInput<<<dimGrid, dimBlock>>>(deviceOutput, deviceAuxScannedArray, numElements);
cudaDeviceSynchronize();
stw.stop();
printf("Performing CUDA computation: %f ms\n", stw.getTime());
stw.reset();
stw.start();
cudaMemcpy(hostOutput, deviceOutput, numElements * sizeof(int),
cudaMemcpyDeviceToHost);
stw.stop();
printf("Copying output memory to the CPU: %f ms\n", stw.getTime());
stw.reset();
stw.start();
cudaFree(deviceInput);
cudaFree(deviceOutput);
cudaFree(deviceAuxArray);
cudaFree(deviceAuxScannedArray);
stw.stop();
printf("Freeing GPU Memory: %f ms\n", stw.getTime());
if ((outfile = fopen("output.raw", "r")) == NULL)
{ printf("Cannot open output.raw.\n"); exit(EXIT_FAILURE); }
fscanf(outfile, "%i", &outputLength);
expectedOutput = (int *)malloc(sizeof(int) * outputLength);
for (int i = 0; i < outputLength; i++)
fscanf(outfile, "%i", &expectedOutput[i]);
fclose(outfile);
int test = 1;
for (int i=0;i<outputLength;i++)
{
printf("%i\n",hostOutput[i]);
}
for (int i = 0; i < outputLength; i++) {
if (expectedOutput[i] != hostOutput[i])
printf("%i %i %i\n", i, expectedOutput[i], hostOutput[i]);
test = test && (expectedOutput[i] == hostOutput[i]);
}
if (test) printf("Results correct.\n");
else printf("Results incorrect.\n");
free(hostInput);
cudaFreeHost(hostOutput);
free(expectedOutput);
return 0;
}
|
1714464f4a701f8afb6897e31dd96eee3bc0b570.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdint.h>
#include <algorithm>
#include "kernel/gpu/cuda_impl/slice_impl.cuh"
template <typename T>
__global__ void Slice(const T* input, int p, int start, int length, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (length); pos += blockDim.x * gridDim.x) {
output[p + pos] = input[start + pos];
}
return;
}
template <typename T>
__global__ void SliceGrad(const T* dy, int p, int start, int length, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (length); pos += blockDim.x * gridDim.x) {
output[start + pos] = dy[p + pos];
}
return;
}
template <typename T>
__global__ void StridedSlice(const T* input, int p, int start, int begin, int stride, int ended, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < ((ended - 1 - begin) / stride) + 1;
pos += blockDim.x * gridDim.x) {
output[p + pos] = input[start + pos * stride];
}
return;
}
template <typename T>
__global__ void StridedSliceGrad(const T* dy, int p, int start, int begin, int stride, int ended, T* dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < ((ended - 1 - begin) / stride) + 1;
pos += blockDim.x * gridDim.x) {
dx[start + pos * stride] = dy[p + pos];
}
return;
}
template <typename T>
__global__ void FillArray(T* addr, const size_t len, const float value) {
T value_ = static_cast<T>(value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < len; pos += blockDim.x * gridDim.x) {
addr[pos] = value_;
}
return;
}
template <typename T>
void FillDeviceArray(const size_t input_size, T* addr, const float value, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( FillArray), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, addr, input_size, value);
return;
}
template <typename T>
void CalSlice(const size_t input_size, const T* input, const std::vector<int> in_shape, const std::vector<int> begin,
const std::vector<int> size, T* output, hipStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int length = size[3];
int p = 0;
for (int i = begin[0]; i < size[0] + begin[0]; i++) {
for (int j = begin[1]; j < size[1] + begin[1]; j++) {
for (int k = begin[2]; k < size[2] + begin[2]; k++) {
hipLaunchKernelGGL(( Slice), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, input, p, i * block + j * map + k * w + begin[3],
length, output);
p = p + size[3];
}
}
}
}
template <typename T>
void CalSliceGrad(const size_t input_size, const T* dy, const std::vector<int> in_shape, const std::vector<int> begin,
const std::vector<int> size, T* output, hipStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int length = size[3];
int p = 0;
for (int i = begin[0]; i < size[0] + begin[0]; i++) {
for (int j = begin[1]; j < size[1] + begin[1]; j++) {
for (int k = begin[2]; k < size[2] + begin[2]; k++) {
hipLaunchKernelGGL(( SliceGrad), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream,
dy, p, i * block + j * map + k * w + begin[3], length, output);
p = p + size[3];
}
}
}
}
template <typename T>
void CalStridedSlice(const size_t input_size, const T* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end, const std::vector<int> strides,
T* output, hipStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int ended = end[3];
int p = 0;
int start = 0;
for (int i = begin[0]; i < ((end[0] > begin[0]) ? end[0] : (2 * begin[0] - end[0])); i += std::abs(strides[0])) {
for (int j = begin[1]; j < ((end[1] > begin[1]) ? end[1] : (2 * begin[1] - end[1])); j += std::abs(strides[1])) {
for (int k = begin[2]; k < ((end[2] > begin[2]) ? end[2] : (2 * begin[2] - end[2])); k += std::abs(strides[2])) {
start = (strides[0] > 0 ? i : 2 * begin[0] - i) * block + (strides[1] > 0 ? j : 2 * begin[1] - j) * map +
(strides[2] > 0 ? k : 2 * begin[2] - k) * w + begin[3];
hipLaunchKernelGGL(( StridedSlice), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, input, p, start, begin[3], strides[3],
ended, output);
p = p + (end[3] - 1 - begin[3]) / strides[3] + 1;
}
}
}
}
template <typename T>
void CalStridedSliceGrad(const size_t input_size, const T* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end, const std::vector<int> strides,
T* dx, hipStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int ended = end[3];
int p = 0;
int start = 0;
for (int i = begin[0]; i < ((end[0] > begin[0]) ? end[0] : (2 * begin[0] - end[0] + 1)); i += std::abs(strides[0])) {
for (int j = begin[1]; j < ((end[1] > begin[1]) ? end[1] : (2 * begin[1] - end[1] + 1));
j += std::abs(strides[1])) {
for (int k = begin[2]; k < ((end[2] > begin[2]) ? end[2] : (2 * begin[2] - end[2] + 1));
k += std::abs(strides[2])) {
start = (strides[0] > 0 ? i : 2 * begin[0] - i) * block + (strides[1] > 0 ? j : 2 * begin[1] - j) * map +
(strides[2] > 0 ? k : 2 * begin[2] - k) * w + begin[3];
hipLaunchKernelGGL(( StridedSliceGrad), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, dy, p, start, begin[3], strides[3],
ended, dx);
p = p + (end[3] - 1 - begin[3]) / strides[3] + 1;
}
}
}
}
template void FillDeviceArray<float>(const size_t input_size, float* addr, const float value, hipStream_t cuda_stream);
template void CalSlice<float>(const size_t input_size, const float* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, float* output,
hipStream_t cuda_stream);
template void CalSliceGrad<float>(const size_t input_size, const float* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, float* output,
hipStream_t cuda_stream);
template void CalStridedSlice<float>(const size_t input_size, const float* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, float* output, hipStream_t cuda_stream);
template void CalStridedSliceGrad<float>(const size_t input_size, const float* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, float* dx, hipStream_t cuda_stream);
template void FillDeviceArray<half>(const size_t input_size, half* addr, const float value, hipStream_t cuda_stream);
template void CalSlice<half>(const size_t input_size, const half* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, half* output,
hipStream_t cuda_stream);
template void CalSliceGrad<half>(const size_t input_size, const half* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, half* output,
hipStream_t cuda_stream);
template void CalStridedSlice<half>(const size_t input_size, const half* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, half* output, hipStream_t cuda_stream);
template void CalStridedSliceGrad<half>(const size_t input_size, const half* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, half* dx, hipStream_t cuda_stream);
template void FillDeviceArray<int>(const size_t input_size, int* addr, const float value, hipStream_t cuda_stream);
template void CalSlice<int>(const size_t input_size, const int* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, int* output,
hipStream_t cuda_stream);
template void CalSliceGrad<int>(const size_t input_size, const int* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, int* output,
hipStream_t cuda_stream);
template void CalStridedSlice<int>(const size_t input_size, const int* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, int* output, hipStream_t cuda_stream);
template void CalStridedSliceGrad<int>(const size_t input_size, const int* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, int* dx, hipStream_t cuda_stream);
| 1714464f4a701f8afb6897e31dd96eee3bc0b570.cu | /**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdint.h>
#include <algorithm>
#include "kernel/gpu/cuda_impl/slice_impl.cuh"
template <typename T>
__global__ void Slice(const T* input, int p, int start, int length, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (length); pos += blockDim.x * gridDim.x) {
output[p + pos] = input[start + pos];
}
return;
}
template <typename T>
__global__ void SliceGrad(const T* dy, int p, int start, int length, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (length); pos += blockDim.x * gridDim.x) {
output[start + pos] = dy[p + pos];
}
return;
}
template <typename T>
__global__ void StridedSlice(const T* input, int p, int start, int begin, int stride, int ended, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < ((ended - 1 - begin) / stride) + 1;
pos += blockDim.x * gridDim.x) {
output[p + pos] = input[start + pos * stride];
}
return;
}
template <typename T>
__global__ void StridedSliceGrad(const T* dy, int p, int start, int begin, int stride, int ended, T* dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < ((ended - 1 - begin) / stride) + 1;
pos += blockDim.x * gridDim.x) {
dx[start + pos * stride] = dy[p + pos];
}
return;
}
template <typename T>
__global__ void FillArray(T* addr, const size_t len, const float value) {
T value_ = static_cast<T>(value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < len; pos += blockDim.x * gridDim.x) {
addr[pos] = value_;
}
return;
}
template <typename T>
void FillDeviceArray(const size_t input_size, T* addr, const float value, cudaStream_t cuda_stream) {
FillArray<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(addr, input_size, value);
return;
}
template <typename T>
void CalSlice(const size_t input_size, const T* input, const std::vector<int> in_shape, const std::vector<int> begin,
const std::vector<int> size, T* output, cudaStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int length = size[3];
int p = 0;
for (int i = begin[0]; i < size[0] + begin[0]; i++) {
for (int j = begin[1]; j < size[1] + begin[1]; j++) {
for (int k = begin[2]; k < size[2] + begin[2]; k++) {
Slice<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(input, p, i * block + j * map + k * w + begin[3],
length, output);
p = p + size[3];
}
}
}
}
template <typename T>
void CalSliceGrad(const size_t input_size, const T* dy, const std::vector<int> in_shape, const std::vector<int> begin,
const std::vector<int> size, T* output, cudaStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int length = size[3];
int p = 0;
for (int i = begin[0]; i < size[0] + begin[0]; i++) {
for (int j = begin[1]; j < size[1] + begin[1]; j++) {
for (int k = begin[2]; k < size[2] + begin[2]; k++) {
SliceGrad<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(
dy, p, i * block + j * map + k * w + begin[3], length, output);
p = p + size[3];
}
}
}
}
template <typename T>
void CalStridedSlice(const size_t input_size, const T* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end, const std::vector<int> strides,
T* output, cudaStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int ended = end[3];
int p = 0;
int start = 0;
for (int i = begin[0]; i < ((end[0] > begin[0]) ? end[0] : (2 * begin[0] - end[0])); i += std::abs(strides[0])) {
for (int j = begin[1]; j < ((end[1] > begin[1]) ? end[1] : (2 * begin[1] - end[1])); j += std::abs(strides[1])) {
for (int k = begin[2]; k < ((end[2] > begin[2]) ? end[2] : (2 * begin[2] - end[2])); k += std::abs(strides[2])) {
start = (strides[0] > 0 ? i : 2 * begin[0] - i) * block + (strides[1] > 0 ? j : 2 * begin[1] - j) * map +
(strides[2] > 0 ? k : 2 * begin[2] - k) * w + begin[3];
StridedSlice<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(input, p, start, begin[3], strides[3],
ended, output);
p = p + (end[3] - 1 - begin[3]) / strides[3] + 1;
}
}
}
}
template <typename T>
void CalStridedSliceGrad(const size_t input_size, const T* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end, const std::vector<int> strides,
T* dx, cudaStream_t cuda_stream) {
int block = in_shape[1] * in_shape[2] * in_shape[3];
int map = in_shape[2] * in_shape[3];
int w = in_shape[3];
int ended = end[3];
int p = 0;
int start = 0;
for (int i = begin[0]; i < ((end[0] > begin[0]) ? end[0] : (2 * begin[0] - end[0] + 1)); i += std::abs(strides[0])) {
for (int j = begin[1]; j < ((end[1] > begin[1]) ? end[1] : (2 * begin[1] - end[1] + 1));
j += std::abs(strides[1])) {
for (int k = begin[2]; k < ((end[2] > begin[2]) ? end[2] : (2 * begin[2] - end[2] + 1));
k += std::abs(strides[2])) {
start = (strides[0] > 0 ? i : 2 * begin[0] - i) * block + (strides[1] > 0 ? j : 2 * begin[1] - j) * map +
(strides[2] > 0 ? k : 2 * begin[2] - k) * w + begin[3];
StridedSliceGrad<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(dy, p, start, begin[3], strides[3],
ended, dx);
p = p + (end[3] - 1 - begin[3]) / strides[3] + 1;
}
}
}
}
template void FillDeviceArray<float>(const size_t input_size, float* addr, const float value, cudaStream_t cuda_stream);
template void CalSlice<float>(const size_t input_size, const float* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, float* output,
cudaStream_t cuda_stream);
template void CalSliceGrad<float>(const size_t input_size, const float* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, float* output,
cudaStream_t cuda_stream);
template void CalStridedSlice<float>(const size_t input_size, const float* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, float* output, cudaStream_t cuda_stream);
template void CalStridedSliceGrad<float>(const size_t input_size, const float* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, float* dx, cudaStream_t cuda_stream);
template void FillDeviceArray<half>(const size_t input_size, half* addr, const float value, cudaStream_t cuda_stream);
template void CalSlice<half>(const size_t input_size, const half* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, half* output,
cudaStream_t cuda_stream);
template void CalSliceGrad<half>(const size_t input_size, const half* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, half* output,
cudaStream_t cuda_stream);
template void CalStridedSlice<half>(const size_t input_size, const half* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, half* output, cudaStream_t cuda_stream);
template void CalStridedSliceGrad<half>(const size_t input_size, const half* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, half* dx, cudaStream_t cuda_stream);
template void FillDeviceArray<int>(const size_t input_size, int* addr, const float value, cudaStream_t cuda_stream);
template void CalSlice<int>(const size_t input_size, const int* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, int* output,
cudaStream_t cuda_stream);
template void CalSliceGrad<int>(const size_t input_size, const int* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> size, int* output,
cudaStream_t cuda_stream);
template void CalStridedSlice<int>(const size_t input_size, const int* input, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, int* output, cudaStream_t cuda_stream);
template void CalStridedSliceGrad<int>(const size_t input_size, const int* dy, const std::vector<int> in_shape,
const std::vector<int> begin, const std::vector<int> end,
const std::vector<int> strides, int* dx, cudaStream_t cuda_stream);
|
ef6094c0c63e5a1839af2567fe0d7475ba836c59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlacpy_cnjg.cu normal z -> s, Sat Nov 15 19:53:57 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************
*
* SWAP BLAS: permute to set of N elements
*
********************************************************/
/*
* First version: line per line
*/
typedef struct {
float *A1;
float *A2;
int n, lda1, lda2;
} magmagpu_slacpy_cnjg_params_t;
__global__ void magmagpu_slacpy_cnjg( magmagpu_slacpy_cnjg_params_t params )
{
unsigned int x = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = x*params.lda1;
unsigned int offset2 = x*params.lda2;
if( x < params.n )
{
float *A1 = params.A1 + offset1;
float *A2 = params.A2 + offset2;
*A2 = MAGMA_S_CNJG(*A1);
}
}
extern "C" void
magmablas_slacpy_cnjg_q(
magma_int_t n, float *dA1, magma_int_t lda1,
float *dA2, magma_int_t lda2,
magma_queue_t queue )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magmagpu_slacpy_cnjg_params_t params = { dA1, dA2, n, lda1, lda2 };
hipLaunchKernelGGL(( magmagpu_slacpy_cnjg), dim3(blocks), dim3(blocksize), 0, queue , params );
}
extern "C" void
magmablas_slacpy_cnjg(
magma_int_t n, float *dA1, magma_int_t lda1,
float *dA2, magma_int_t lda2)
{
magmablas_slacpy_cnjg_q( n, dA1, lda1, dA2, lda2, magma_stream );
}
| ef6094c0c63e5a1839af2567fe0d7475ba836c59.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlacpy_cnjg.cu normal z -> s, Sat Nov 15 19:53:57 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************
*
* SWAP BLAS: permute to set of N elements
*
********************************************************/
/*
* First version: line per line
*/
typedef struct {
float *A1;
float *A2;
int n, lda1, lda2;
} magmagpu_slacpy_cnjg_params_t;
__global__ void magmagpu_slacpy_cnjg( magmagpu_slacpy_cnjg_params_t params )
{
unsigned int x = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = x*params.lda1;
unsigned int offset2 = x*params.lda2;
if( x < params.n )
{
float *A1 = params.A1 + offset1;
float *A2 = params.A2 + offset2;
*A2 = MAGMA_S_CNJG(*A1);
}
}
extern "C" void
magmablas_slacpy_cnjg_q(
magma_int_t n, float *dA1, magma_int_t lda1,
float *dA2, magma_int_t lda2,
magma_queue_t queue )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magmagpu_slacpy_cnjg_params_t params = { dA1, dA2, n, lda1, lda2 };
magmagpu_slacpy_cnjg<<< blocks, blocksize, 0, queue >>>( params );
}
extern "C" void
magmablas_slacpy_cnjg(
magma_int_t n, float *dA1, magma_int_t lda1,
float *dA2, magma_int_t lda2)
{
magmablas_slacpy_cnjg_q( n, dA1, lda1, dA2, lda2, magma_stream );
}
|
87500b64a3f8eaa5634e64c61801817f52eb8f14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CUDA Mesh Node.
// -------------------------------------------------------------------
// Copyright (C) 2010 OpenEngine.dk (See AUTHORS)
//
// This program is free software; It is covered by the GNU General
// Public License version 2 or any later version.
// See the GNU General Public License for more details (see LICENSE).
//--------------------------------------------------------------------
#include <Scene/CUDAMeshNode.h>
#include <Geometry/GeometrySet.h>
#include <Geometry/Mesh.h>
#include <Resources/IDataBlock.h>
#include <Scene/MeshNode.h>
#include <Utils/CUDA/Convert.h>
#include <Utils/CUDA/Utils.h>
namespace OpenEngine {
using namespace Resources;
using namespace Resources::CUDA;
namespace Scene {
__global__ void CopyVertices(float3* vertIn,
float4* vertOut,
const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
vertOut[id] = make_float4(vertIn[id], 1.0);
}
}
__global__ void CopyNormals(float3* normIn,
float4* normOut,
const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
normOut[id] = make_float4(normIn[id], 0.0);
}
}
__global__ void CopyColors(float3* colorIn, uchar4 *colorOut, const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
float3 c = colorIn[id];
colorOut[id] = make_uchar4(c.x * 255, c.y * 255, c.z * 255, 255);
}
}
__global__ void CopyColors(float4* colorIn, uchar4 *colorOut, const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
float4 c = colorIn[id];
colorOut[id] = make_uchar4(c.x * 255, c.y * 255, c.z * 255, c.w * 255);
}
}
__global__ void SetColor(const uchar4 color, uchar4* colorOut, const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
colorOut[id] = color;
}
}
CUDAMeshNode::CUDAMeshNode(MeshNode* mesh){
IDataBlockPtr v = mesh->GetMesh()->GetGeometrySet()->GetVertices();
IDataBlockPtr n = mesh->GetMesh()->GetGeometrySet()->GetNormals();
IDataBlockPtr c = mesh->GetMesh()->GetGeometrySet()->GetColors();
IndicesPtr i = mesh->GetMesh()->GetIndices();
unsigned int size = v->GetSize();
unsigned int blocks, threads;
Calc1DKernelDimensions(size, blocks, threads, 128);
// @TODO change to use mapped memory
float3 *hat;
hipMalloc(&hat, size * sizeof(float3));
vertices = new CUDADataBlock<float4>(size);
if (v->GetDimension() == 3){
hipMemcpy(hat, v->GetVoidDataPtr(), size * sizeof(float3), hipMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
hipLaunchKernelGGL(( CopyVertices), dim3(blocks), dim3(threads), 0, 0, hat, vertices->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
}else if (v->GetDimension() == 4){
hipMemcpy(vertices->GetDeviceData(), v->GetVoidData(), size * sizeof(float4), hipMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
}else
throw Exception("Deux the fuck");
normals = new CUDADataBlock<float4>(size);
if (n->GetDimension() == 3){
hipMemcpy(hat, n->GetVoidDataPtr(), size * sizeof(float3), hipMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
hipLaunchKernelGGL(( CopyNormals), dim3(blocks), dim3(threads), 0, 0, hat, normals->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
}else
throw Exception("Quad the fuck");
hipFree(hat);
colors = new CUDADataBlock<uchar4>(size);
if (c != NULL){
if (c->GetDimension() == 3){
float3 *hat;
hipMalloc(&hat, size * sizeof(float3));
hipMemcpy(hat, c->GetVoidDataPtr(), size * sizeof(float3), hipMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
hipLaunchKernelGGL(( CopyColors), dim3(blocks), dim3(threads), 0, 0, hat, colors->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
hipFree(hat);
}else if (c->GetDimension() == 4){
float4 *hat;
hipMalloc(&hat, size * sizeof(float4));
hipMemcpy(hat, c->GetVoidDataPtr(), size * sizeof(float4), hipMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
hipLaunchKernelGGL(( CopyColors), dim3(blocks), dim3(threads), 0, 0, hat, colors->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
hipFree(hat);
}
CHECK_FOR_CUDA_ERROR();
}else
hipLaunchKernelGGL(( SetColor), dim3(blocks), dim3(threads), 0, 0, make_uchar4(180, 180, 180, 255), colors->GetDeviceData(), size);
indices = new CUDADataBlock<unsigned int>(i->GetSize());
hipMemcpy(indices->GetDeviceData(), i->GetData(), i->GetSize() * sizeof(unsigned int), hipMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
}
const std::string CUDAMeshNode::ToString() const {
return "Vertices: " + vertices->ToString() +
"\nNormals: " + normals->ToString() +
"\nColors: " + colors->ToString() +
"\nIndices: " + indices->ToString();
}
}
}
| 87500b64a3f8eaa5634e64c61801817f52eb8f14.cu | // CUDA Mesh Node.
// -------------------------------------------------------------------
// Copyright (C) 2010 OpenEngine.dk (See AUTHORS)
//
// This program is free software; It is covered by the GNU General
// Public License version 2 or any later version.
// See the GNU General Public License for more details (see LICENSE).
//--------------------------------------------------------------------
#include <Scene/CUDAMeshNode.h>
#include <Geometry/GeometrySet.h>
#include <Geometry/Mesh.h>
#include <Resources/IDataBlock.h>
#include <Scene/MeshNode.h>
#include <Utils/CUDA/Convert.h>
#include <Utils/CUDA/Utils.h>
namespace OpenEngine {
using namespace Resources;
using namespace Resources::CUDA;
namespace Scene {
__global__ void CopyVertices(float3* vertIn,
float4* vertOut,
const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
vertOut[id] = make_float4(vertIn[id], 1.0);
}
}
__global__ void CopyNormals(float3* normIn,
float4* normOut,
const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
normOut[id] = make_float4(normIn[id], 0.0);
}
}
__global__ void CopyColors(float3* colorIn, uchar4 *colorOut, const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
float3 c = colorIn[id];
colorOut[id] = make_uchar4(c.x * 255, c.y * 255, c.z * 255, 255);
}
}
__global__ void CopyColors(float4* colorIn, uchar4 *colorOut, const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
float4 c = colorIn[id];
colorOut[id] = make_uchar4(c.x * 255, c.y * 255, c.z * 255, c.w * 255);
}
}
__global__ void SetColor(const uchar4 color, uchar4* colorOut, const int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
colorOut[id] = color;
}
}
CUDAMeshNode::CUDAMeshNode(MeshNode* mesh){
IDataBlockPtr v = mesh->GetMesh()->GetGeometrySet()->GetVertices();
IDataBlockPtr n = mesh->GetMesh()->GetGeometrySet()->GetNormals();
IDataBlockPtr c = mesh->GetMesh()->GetGeometrySet()->GetColors();
IndicesPtr i = mesh->GetMesh()->GetIndices();
unsigned int size = v->GetSize();
unsigned int blocks, threads;
Calc1DKernelDimensions(size, blocks, threads, 128);
// @TODO change to use mapped memory
float3 *hat;
cudaMalloc(&hat, size * sizeof(float3));
vertices = new CUDADataBlock<float4>(size);
if (v->GetDimension() == 3){
cudaMemcpy(hat, v->GetVoidDataPtr(), size * sizeof(float3), cudaMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
CopyVertices<<<blocks, threads>>>(hat, vertices->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
}else if (v->GetDimension() == 4){
cudaMemcpy(vertices->GetDeviceData(), v->GetVoidData(), size * sizeof(float4), cudaMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
}else
throw Exception("Deux the fuck");
normals = new CUDADataBlock<float4>(size);
if (n->GetDimension() == 3){
cudaMemcpy(hat, n->GetVoidDataPtr(), size * sizeof(float3), cudaMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
CopyNormals<<<blocks, threads>>>(hat, normals->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
}else
throw Exception("Quad the fuck");
cudaFree(hat);
colors = new CUDADataBlock<uchar4>(size);
if (c != NULL){
if (c->GetDimension() == 3){
float3 *hat;
cudaMalloc(&hat, size * sizeof(float3));
cudaMemcpy(hat, c->GetVoidDataPtr(), size * sizeof(float3), cudaMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
CopyColors<<<blocks, threads>>>(hat, colors->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
cudaFree(hat);
}else if (c->GetDimension() == 4){
float4 *hat;
cudaMalloc(&hat, size * sizeof(float4));
cudaMemcpy(hat, c->GetVoidDataPtr(), size * sizeof(float4), cudaMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
CopyColors<<<blocks, threads>>>(hat, colors->GetDeviceData(), size);
CHECK_FOR_CUDA_ERROR();
cudaFree(hat);
}
CHECK_FOR_CUDA_ERROR();
}else
SetColor<<<blocks, threads>>>(make_uchar4(180, 180, 180, 255), colors->GetDeviceData(), size);
indices = new CUDADataBlock<unsigned int>(i->GetSize());
cudaMemcpy(indices->GetDeviceData(), i->GetData(), i->GetSize() * sizeof(unsigned int), cudaMemcpyHostToDevice);
CHECK_FOR_CUDA_ERROR();
}
const std::string CUDAMeshNode::ToString() const {
return "Vertices: " + vertices->ToString() +
"\nNormals: " + normals->ToString() +
"\nColors: " + colors->ToString() +
"\nIndices: " + indices->ToString();
}
}
}
|
243cd6524c288db478200d0fa274b97112143e40.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPEvent.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <THH/THH.h>
namespace at {
namespace native {
using namespace at::cuda;
// device-to-device copy, does type conversion
void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool same_conj = iter.tensor(0).is_conj() == iter.tensor(1).is_conj();
bool same_neg = iter.tensor(0).is_neg() == iter.tensor(1).is_neg();
bool memcpy_eligible = same_type && same_conj && same_neg && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
HIPGuardMasqueradingAsCUDA device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// hipMemcpyAsync on the default stream.
HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
void *dst = iter.data_ptr(0);
void *src = iter.data_ptr(1);
size_t size = numel * iter.element_size(0);
if (src != dst || src_device != dst_device) {
// Perform the copy
AT_CUDA_CHECK(hipMemcpyAsync(
dst, src, size,
hipMemcpyDeviceToDevice,
copy_stream));
}
} else {
auto dtype = iter.dtype(0);
if (isQIntType(dtype)) {
AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; });
});
} else {
if (same_neg) {
if (!same_conj && same_type) {
AT_DISPATCH_COMPLEX_TYPES(
dtype, "copy_conj_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return std::conj(x); });
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBool, kBFloat16, dtype, "copy_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; });
});
}
} else {
if (!same_conj && same_type) {
AT_DISPATCH_COMPLEX_TYPES(
dtype, "copy_conj_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return std::conj(-x); });
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBool, kBFloat16, dtype, "copy_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return -x; });
});
}
}
}
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
}
AT_CUDA_CHECK(hipGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use hipMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it involves the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// propagate the correct conjugate bit
dst_contig._set_conj(dst.is_conj());
src_contig._set_conj(iter.tensor(1).is_conj());
dst_contig._set_neg(dst.is_neg());
src_contig._set_neg(iter.tensor(1).is_neg());
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
hipMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = hipMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = hipMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA();
if (non_blocking) {
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
at::cuda::memcpy_and_sync(dst, src, nbytes, kind, stream);
}
if (iter.tensor(0).is_conj() != iter.tensor(1).is_conj()) {
iter.tensor(0).conj_physical_();
}
if (iter.tensor(0).is_neg() != iter.tensor(1).is_neg()) {
iter.tensor(0).neg_();
}
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
} // namespace native
} // namespace at
| 243cd6524c288db478200d0fa274b97112143e40.cu | #include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <THC/THC.h>
namespace at {
namespace native {
using namespace at::cuda;
// device-to-device copy, does type conversion
void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool same_conj = iter.tensor(0).is_conj() == iter.tensor(1).is_conj();
bool same_neg = iter.tensor(0).is_neg() == iter.tensor(1).is_neg();
bool memcpy_eligible = same_type && same_conj && same_neg && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
CUDAGuard device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// cudaMemcpyAsync on the default stream.
CUDAStream copy_stream = getCurrentCUDAStream(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentCUDAStream(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
void *dst = iter.data_ptr(0);
void *src = iter.data_ptr(1);
size_t size = numel * iter.element_size(0);
if (src != dst || src_device != dst_device) {
// Perform the copy
AT_CUDA_CHECK(cudaMemcpyAsync(
dst, src, size,
cudaMemcpyDeviceToDevice,
copy_stream));
}
} else {
auto dtype = iter.dtype(0);
if (isQIntType(dtype)) {
AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; });
});
} else {
if (same_neg) {
if (!same_conj && same_type) {
AT_DISPATCH_COMPLEX_TYPES(
dtype, "copy_conj_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return std::conj(x); });
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBool, kBFloat16, dtype, "copy_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; });
});
}
} else {
if (!same_conj && same_type) {
AT_DISPATCH_COMPLEX_TYPES(
dtype, "copy_conj_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return std::conj(-x); });
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBool, kBFloat16, dtype, "copy_", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return -x; });
});
}
}
}
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentCUDAStream(dst_device.index()));
}
AT_CUDA_CHECK(cudaGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use cudaMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it involves the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// propagate the correct conjugate bit
dst_contig._set_conj(dst.is_conj());
src_contig._set_conj(iter.tensor(1).is_conj());
dst_contig._set_neg(dst.is_neg());
src_contig._set_neg(iter.tensor(1).is_neg());
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
cuda::OptionalCUDAGuard device_guard;
cudaMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = cudaMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = cudaMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
CUDAStream stream = getCurrentCUDAStream();
if (non_blocking) {
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
at::cuda::memcpy_and_sync(dst, src, nbytes, kind, stream);
}
if (iter.tensor(0).is_conj() != iter.tensor(1).is_conj()) {
iter.tensor(0).conj_physical_();
}
if (iter.tensor(0).is_neg() != iter.tensor(1).is_neg()) {
iter.tensor(0).neg_();
}
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
} // namespace native
} // namespace at
|
241d9b7cb2f9b2fa2b4051f5a77a42214039f495.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include<iostream>
#include<fstream>
#include<ctime>
#include <hip/hip_runtime.h>
#define TILE_WIDTH 8
using namespace std;
//kernel function used by im2col(executed on GPU)
//reshape the feature map such that convolution can be executed as matrix multiplication
__global__ void im2colkernel(float* M, float* new_M, int N, int C, int H, int W, int K)
{
int H_ = H - K + 1;
int W_ = W - K + 1;
int bx = blockIdx.x;//the feature map number
int tx = threadIdx.x;//the index of partition on x axis
int ty = threadIdx.y;//the index of partition on y axis
int tc = threadIdx.z;//the index of partition on **channal** axis
int x_range = H_ / blockDim.x;//suppose exact division
int y_range = W_ / blockDim.y;//suppose exact division
int c_range = C / blockDim.z;//suppose exact division
for (int x = 0; x < x_range; x++)
{
for (int y = 0; y < y_range; y++)
{
for (int c = 0; c < c_range; c++)
{
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
new_M[bx * H_ * W_ * C * K * K + ((tx * x_range + x) * W_ + (ty * y_range + y)) * C * K * K + (tc * c_range + c) * K * K + i * K + j] = M[bx * C * H * W + (tc * c_range + c) * H * W + (tx * x_range + x) * W + (ty * y_range + y) + i * W + j];
}
}
}
}
}
}
//wrapper function if im2colkernel
//accept a normal pointer
//return a cuda pointer
float* im2col(float* M, int N, int C, int H, int W, int K)
{
float* Md, * new_M;
int H_ = H - K + 1;
int W_ = W - K + 1;
int size_M = N * C * H * W * sizeof(float);
int size_new_M = N * H_ * W_ * C * K * K * sizeof(float);
//Transfer M to device memory
hipMalloc((void**)&Md, size_M);
hipMemcpy(Md, M, size_M, hipMemcpyHostToDevice);
//allocate space for new_M
hipMalloc((void**)&new_M, size_new_M);
dim3 dimBlock(6, 6, 8);//6 threads for with and height, and 8 threads for channal
//Execute on GPU
im2colkernel << <8, dimBlock >> > (Md, new_M, N, C, H, W, K);//8 blocks to handle different feature maps
hipFree(Md);
return new_M;
}
//kernel function used by reshape_conv(executed on GPU)
//reshape the convolution kernel such that convolution can be executed as matrix multiplication
__global__ void reshape_convkernel(float* conv, float* new_conv, int F, int C, int K)
{
int bx = blockIdx.x;//the number of convmap(f)
int tx = threadIdx.x;//the number of channal(c)
int b_range = F / gridDim.x;//suppose exact division
int t_range = C / blockDim.x;//suppose exact division
for (int f = 0; f < b_range; f++)
{
for (int c = 0; c < t_range; c++)
{
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
new_conv[((c + t_range * tx) * K * K + i * K + j) * F + (f + b_range * bx)] = conv[(f + b_range * bx) * C * K * K + (c + t_range * tx) * K * K + i * K + j];
}
}
}
}
}
//wrapper function if reshape_convkernel
//accept and return a cuda pointer
float* reshape_conv(float* conv, int F, int C, int K)
{
float* convd, * new_conv;
int size_convd = K * K * C * F * sizeof(float);
int size_new_conv = K * K * C * F * sizeof(float);
//Transfer conv to device memory
hipMalloc((void**)&convd, size_convd);
hipMemcpy(convd, conv, size_new_conv, hipMemcpyHostToDevice);
//allocate space for new_conv
hipMalloc((void**)&new_conv, size_new_conv);
//Execute on GPU
reshape_convkernel << <32, 32 >> > (convd, new_conv, F, C, K);// 32 blocks for F and 32 threads for channals
hipFree(convd);
return new_conv;
}
//kernel function to reshape the output of matrix multiplication into the final result of convolution
//this will be invoked in MatrixMultiplication function
__global__ void reshape_outputkernel(float* P,float* ans,int size_P, int F, int N, int H_, int W_)
{
int bx = blockIdx.x;//the number of feature map(N)
int by = blockIdx.y;//the number for F
int tx = threadIdx.x;//the number of height
int ty = threadIdx.y;//the number of width
int bx_range = N / gridDim.x;//suppose exact division
int by_range = F / gridDim.y;//suppose exact division
int tx_range = H_ / blockDim.x;//suppose exact division
int ty_range = W_ / blockDim.y;//suppose exact division
for (int n = 0; n < bx_range; n++)
{
for (int h_ = 0; h_ < tx_range; h_++)
{
for (int w_ = 0; w_ < ty_range; w_++)
{
for (int f = 0; f < by_range; f++)
{
ans[(n+ bx_range*bx) * F * H_ * W_ + (f+by_range*by) * H_ * W_ + (h_+ tx_range*tx) * W_ + (w_+ ty_range*ty)] = P[(n + bx_range * bx) * F * H_ * W_ + (h_ + tx_range * tx) * W_ * F + (w_ + ty_range * ty) * F + (f + by_range * by)];
}
}
}
}
}
//kernel fuction to execute matrix multiplication(tiled)
//this will be invoked in MatrixMultiplication function
__global__ void Matrixkernel(float* Md, float* Nd, float* Pd, int m, int k, int n)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int i = 0; i < (k - 1) / TILE_WIDTH + 1; i++)//here i<(k - 1) / TILE_WIDTH + 1, this insures that every element in Pd will be calculated
{
if (Row < m && i * TILE_WIDTH + tx < k)//this if clause ensures no out-of-bounds in Md
Mds[ty][tx] = Md[Row * k + (i * TILE_WIDTH + tx)];
else
Mds[ty][tx] = 0;
if (i * TILE_WIDTH + ty < k && Col < n)//this if clause ensures no out-of-bounds in Nd
Nds[ty][tx] = Nd[Col + (i * TILE_WIDTH + ty) * n];
else
Nds[ty][tx] = 0;
__syncthreads();
for (int j = 0; j < TILE_WIDTH; j++)
{
Pvalue += Mds[ty][j] * Nds[j][tx];
}
__syncthreads();
}
if (Row < m && Col < n)//assign only when the index is legal
Pd[Row * n + Col] = Pvalue;
}
//main function for matrix multi[lication.
//the result of M*M1 will be stored in P
//it will invoke Matrixkernel and reshape_outputkernel
void MatrixMultiplication(float* M, float* M1, float* P, int m, int k, int n,int F,int N,int H_,int W_)
{
//M shape:m*k
//N shape:k*n
//P shape:m*n
float* Pd;
int size_P = m * n * sizeof(float);
//Allocate P on the device
hipMalloc((void**)&Pd, size_P);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid((n - 1) / TILE_WIDTH + 1, (m - 1) / TILE_WIDTH + 1);
//Execute on GPU
Matrixkernel << <dimGrid, dimBlock >> > (M, M1, Pd, m, k, n);
float* ans;//hold the reshaped result of Pd
hipMalloc((void**)&ans, size_P);;
dim3 dimBlock1(2, 8);
dim3 dimGrid1(6, 6);
reshape_outputkernel << <dimBlock1, dimGrid1 >> > (Pd,ans, size_P, F, N, H_, W_);
//Transfer P from device to host
hipMemcpy(P, ans, size_P, hipMemcpyDeviceToHost);
hipFree(M); hipFree(M1); hipFree(Pd); hipFree(ans);//need to free M M1, they were allocated in other functions!
}
//7 loop version of convolution
//on CPU
void naiveconv(float* M, float* kernel, float* ans, int N, int C, int H, int W, int K, int F, int H_, int W_)
{
for (int n = 0; n < N; n++)
{
for (int f = 0; f < F; f++)
{
for (int c = 0; c < C; c++)
{
for (int h = 0; h < H_; h++)
{
for (int w = 0; w < W_; w++)
{
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
ans[n * F * H_ * W_ + f * H_ * W_ + h * W_ + w] += M[n * C * H * W + c * H * W + (h + i) * W + (w + j)] * kernel[f * C * K * K + c * K * K + i * K + j];
}
}
}
}
}
}
}
}
//record the output tensor into log file
void log_output(float* ans, int N, int F, int H_, int W_)
{
ofstream out("./log");
out << "the answer of the convolution is shown below:" << endl << endl;
for (int i = 0; i < N; i++)
{
out << "img" << i << endl;
for (int j = 0; j < F; j++)
{
out << "channel" << j << endl;
for (int k = 0; k < H_; k++)
{
for (int h = 0; h < W_; h++)
{
out << ans[i * F * H_ * W_ + j * H_ * W_ + k * W_ + h] << " ";
}
out << endl;
}
out << endl;
}
out << endl;
}
out.close();
}
int main()
{
//standard
int N = 8;
int C = 64;
int H = 128;
int W = 128;
int F = 128;
int K = 3;
//test
/*int N = 2;
int C = 3;
int H = 5;
int W = 5;
int F = 2;
int K = 3;*/
int H_ = H - K + 1;
int W_ = W - K + 1;
float* M = new float[N * C * H * W];
for (int i = 0; i < N * C * H * W; i++)
{
M[i] = 2;
}
float* kernel = new float[F * C * K * K];
for (int i = 0; i < F * C * K * K; i++)
{
kernel[i] = 2;
}
clock_t startTime, endTime;
startTime = clock();//
//new_M:point to cuda memory
float* new_M = im2col(M, N, C, H, W, K);
//new_kernel:point to cuda memory
float* new_kernel = reshape_conv(kernel, F, C, K);
//ans:normal pointer
float* ans = new float[N * F * H_ * W_];
MatrixMultiplication(new_M, new_kernel, ans, N * H_ * W_, K * K * C, F, F, N, H_, W_);
endTime = clock();//
cout << "Problem 2: The run time is: " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
cout << "now writing ans into log..." << endl;
log_output(ans, N, F, H_, W_);
//delete the allocated space!
delete M;
delete kernel;
delete ans;
cout << "finish";
return 0;
} | 241d9b7cb2f9b2fa2b4051f5a77a42214039f495.cu | #include <cstdio>
#include<iostream>
#include<fstream>
#include<ctime>
#include <cuda_runtime.h>
#define TILE_WIDTH 8
using namespace std;
//kernel function used by im2col(executed on GPU)
//reshape the feature map such that convolution can be executed as matrix multiplication
__global__ void im2colkernel(float* M, float* new_M, int N, int C, int H, int W, int K)
{
int H_ = H - K + 1;
int W_ = W - K + 1;
int bx = blockIdx.x;//the feature map number
int tx = threadIdx.x;//the index of partition on x axis
int ty = threadIdx.y;//the index of partition on y axis
int tc = threadIdx.z;//the index of partition on **channal** axis
int x_range = H_ / blockDim.x;//suppose exact division
int y_range = W_ / blockDim.y;//suppose exact division
int c_range = C / blockDim.z;//suppose exact division
for (int x = 0; x < x_range; x++)
{
for (int y = 0; y < y_range; y++)
{
for (int c = 0; c < c_range; c++)
{
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
new_M[bx * H_ * W_ * C * K * K + ((tx * x_range + x) * W_ + (ty * y_range + y)) * C * K * K + (tc * c_range + c) * K * K + i * K + j] = M[bx * C * H * W + (tc * c_range + c) * H * W + (tx * x_range + x) * W + (ty * y_range + y) + i * W + j];
}
}
}
}
}
}
//wrapper function if im2colkernel
//accept a normal pointer
//return a cuda pointer
float* im2col(float* M, int N, int C, int H, int W, int K)
{
float* Md, * new_M;
int H_ = H - K + 1;
int W_ = W - K + 1;
int size_M = N * C * H * W * sizeof(float);
int size_new_M = N * H_ * W_ * C * K * K * sizeof(float);
//Transfer M to device memory
cudaMalloc((void**)&Md, size_M);
cudaMemcpy(Md, M, size_M, cudaMemcpyHostToDevice);
//allocate space for new_M
cudaMalloc((void**)&new_M, size_new_M);
dim3 dimBlock(6, 6, 8);//6 threads for with and height, and 8 threads for channal
//Execute on GPU
im2colkernel << <8, dimBlock >> > (Md, new_M, N, C, H, W, K);//8 blocks to handle different feature maps
cudaFree(Md);
return new_M;
}
//kernel function used by reshape_conv(executed on GPU)
//reshape the convolution kernel such that convolution can be executed as matrix multiplication
__global__ void reshape_convkernel(float* conv, float* new_conv, int F, int C, int K)
{
int bx = blockIdx.x;//the number of convmap(f)
int tx = threadIdx.x;//the number of channal(c)
int b_range = F / gridDim.x;//suppose exact division
int t_range = C / blockDim.x;//suppose exact division
for (int f = 0; f < b_range; f++)
{
for (int c = 0; c < t_range; c++)
{
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
new_conv[((c + t_range * tx) * K * K + i * K + j) * F + (f + b_range * bx)] = conv[(f + b_range * bx) * C * K * K + (c + t_range * tx) * K * K + i * K + j];
}
}
}
}
}
//wrapper function if reshape_convkernel
//accept and return a cuda pointer
float* reshape_conv(float* conv, int F, int C, int K)
{
float* convd, * new_conv;
int size_convd = K * K * C * F * sizeof(float);
int size_new_conv = K * K * C * F * sizeof(float);
//Transfer conv to device memory
cudaMalloc((void**)&convd, size_convd);
cudaMemcpy(convd, conv, size_new_conv, cudaMemcpyHostToDevice);
//allocate space for new_conv
cudaMalloc((void**)&new_conv, size_new_conv);
//Execute on GPU
reshape_convkernel << <32, 32 >> > (convd, new_conv, F, C, K);// 32 blocks for F and 32 threads for channals
cudaFree(convd);
return new_conv;
}
//kernel function to reshape the output of matrix multiplication into the final result of convolution
//this will be invoked in MatrixMultiplication function
__global__ void reshape_outputkernel(float* P,float* ans,int size_P, int F, int N, int H_, int W_)
{
int bx = blockIdx.x;//the number of feature map(N)
int by = blockIdx.y;//the number for F
int tx = threadIdx.x;//the number of height
int ty = threadIdx.y;//the number of width
int bx_range = N / gridDim.x;//suppose exact division
int by_range = F / gridDim.y;//suppose exact division
int tx_range = H_ / blockDim.x;//suppose exact division
int ty_range = W_ / blockDim.y;//suppose exact division
for (int n = 0; n < bx_range; n++)
{
for (int h_ = 0; h_ < tx_range; h_++)
{
for (int w_ = 0; w_ < ty_range; w_++)
{
for (int f = 0; f < by_range; f++)
{
ans[(n+ bx_range*bx) * F * H_ * W_ + (f+by_range*by) * H_ * W_ + (h_+ tx_range*tx) * W_ + (w_+ ty_range*ty)] = P[(n + bx_range * bx) * F * H_ * W_ + (h_ + tx_range * tx) * W_ * F + (w_ + ty_range * ty) * F + (f + by_range * by)];
}
}
}
}
}
//kernel fuction to execute matrix multiplication(tiled)
//this will be invoked in MatrixMultiplication function
__global__ void Matrixkernel(float* Md, float* Nd, float* Pd, int m, int k, int n)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int i = 0; i < (k - 1) / TILE_WIDTH + 1; i++)//here i<(k - 1) / TILE_WIDTH + 1, this insures that every element in Pd will be calculated
{
if (Row < m && i * TILE_WIDTH + tx < k)//this if clause ensures no out-of-bounds in Md
Mds[ty][tx] = Md[Row * k + (i * TILE_WIDTH + tx)];
else
Mds[ty][tx] = 0;
if (i * TILE_WIDTH + ty < k && Col < n)//this if clause ensures no out-of-bounds in Nd
Nds[ty][tx] = Nd[Col + (i * TILE_WIDTH + ty) * n];
else
Nds[ty][tx] = 0;
__syncthreads();
for (int j = 0; j < TILE_WIDTH; j++)
{
Pvalue += Mds[ty][j] * Nds[j][tx];
}
__syncthreads();
}
if (Row < m && Col < n)//assign only when the index is legal
Pd[Row * n + Col] = Pvalue;
}
//main function for matrix multi[lication.
//the result of M*M1 will be stored in P
//it will invoke Matrixkernel and reshape_outputkernel
void MatrixMultiplication(float* M, float* M1, float* P, int m, int k, int n,int F,int N,int H_,int W_)
{
//M shape:m*k
//N shape:k*n
//P shape:m*n
float* Pd;
int size_P = m * n * sizeof(float);
//Allocate P on the device
cudaMalloc((void**)&Pd, size_P);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid((n - 1) / TILE_WIDTH + 1, (m - 1) / TILE_WIDTH + 1);
//Execute on GPU
Matrixkernel << <dimGrid, dimBlock >> > (M, M1, Pd, m, k, n);
float* ans;//hold the reshaped result of Pd
cudaMalloc((void**)&ans, size_P);;
dim3 dimBlock1(2, 8);
dim3 dimGrid1(6, 6);
reshape_outputkernel << <dimBlock1, dimGrid1 >> > (Pd,ans, size_P, F, N, H_, W_);
//Transfer P from device to host
cudaMemcpy(P, ans, size_P, cudaMemcpyDeviceToHost);
cudaFree(M); cudaFree(M1); cudaFree(Pd); cudaFree(ans);//need to free M M1, they were allocated in other functions!
}
//7 loop version of convolution
//on CPU
void naiveconv(float* M, float* kernel, float* ans, int N, int C, int H, int W, int K, int F, int H_, int W_)
{
for (int n = 0; n < N; n++)
{
for (int f = 0; f < F; f++)
{
for (int c = 0; c < C; c++)
{
for (int h = 0; h < H_; h++)
{
for (int w = 0; w < W_; w++)
{
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
ans[n * F * H_ * W_ + f * H_ * W_ + h * W_ + w] += M[n * C * H * W + c * H * W + (h + i) * W + (w + j)] * kernel[f * C * K * K + c * K * K + i * K + j];
}
}
}
}
}
}
}
}
//record the output tensor into log file
void log_output(float* ans, int N, int F, int H_, int W_)
{
ofstream out("./log");
out << "the answer of the convolution is shown below:" << endl << endl;
for (int i = 0; i < N; i++)
{
out << "img" << i << endl;
for (int j = 0; j < F; j++)
{
out << "channel" << j << endl;
for (int k = 0; k < H_; k++)
{
for (int h = 0; h < W_; h++)
{
out << ans[i * F * H_ * W_ + j * H_ * W_ + k * W_ + h] << " ";
}
out << endl;
}
out << endl;
}
out << endl;
}
out.close();
}
int main()
{
//standard
int N = 8;
int C = 64;
int H = 128;
int W = 128;
int F = 128;
int K = 3;
//test
/*int N = 2;
int C = 3;
int H = 5;
int W = 5;
int F = 2;
int K = 3;*/
int H_ = H - K + 1;
int W_ = W - K + 1;
float* M = new float[N * C * H * W];
for (int i = 0; i < N * C * H * W; i++)
{
M[i] = 2;
}
float* kernel = new float[F * C * K * K];
for (int i = 0; i < F * C * K * K; i++)
{
kernel[i] = 2;
}
clock_t startTime, endTime;
startTime = clock();//计时开始
//new_M:point to cuda memory
float* new_M = im2col(M, N, C, H, W, K);
//new_kernel:point to cuda memory
float* new_kernel = reshape_conv(kernel, F, C, K);
//ans:normal pointer
float* ans = new float[N * F * H_ * W_];
MatrixMultiplication(new_M, new_kernel, ans, N * H_ * W_, K * K * C, F, F, N, H_, W_);
endTime = clock();//计时结束
cout << "Problem 2: The run time is: " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
cout << "now writing ans into log..." << endl;
log_output(ans, N, F, H_, W_);
//delete the allocated space!
delete M;
delete kernel;
delete ans;
cout << "finish";
return 0;
} |
fddbff1750e2ae5eb9eaeda26a6d9eab0a47a1b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
}
| fddbff1750e2ae5eb9eaeda26a6d9eab0a47a1b6.cu | // Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
}
|
2b1f43dba8ecc09ae8aba88e3f49076cf45099f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
//__global__ void hello_kernel()
//{
// printf("Hello cuda world \n");
//}
//int main()
//{
// printf("hello from main \n");
//
// dim3 block();
//
// hello_kernel <<< 1, 1 >>> ();
//
// hipDeviceSynchronize();
// hipDeviceReset();
// return 0;
//} | 2b1f43dba8ecc09ae8aba88e3f49076cf45099f8.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
//__global__ void hello_kernel()
//{
// printf("Hello cuda world \n");
//}
//int main()
//{
// printf("hello from main \n");
//
// dim3 block();
//
// hello_kernel <<< 1, 1 >>> ();
//
// cudaDeviceSynchronize();
// cudaDeviceReset();
// return 0;
//} |
a0745fefc70cef48a97e04498471be11480db5ba.hip | // !!! This is a file automatically generated by hipify!!!
#include <sstream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <fstream>
#include <hip/hip_runtime_api.h>
#include "CDevice.hh"
#include "LaunchCommon.hh" // mkdirp
#include "PLOG.hh"
const plog::Severity CDevice::LEVEL = PLOG::EnvLevel("CDevice", "DEBUG");
const char* CDevice::CVD = "CUDA_VISIBLE_DEVICES" ;
const char* CDevice::desc() const
{
std::stringstream ss ;
// uuid is not printable
ss << "CDevice"
<< " index " << index
<< " ordinal " << ordinal
<< " name " << name
<< " major " << major
<< " minor " << minor
<< " compute_capability " << compute_capability
<< " multiProcessorCount " << multiProcessorCount
<< " totalGlobalMem " << totalGlobalMem
;
std::string s = ss.str();
return strdup(s.c_str());
}
bool CDevice::matches(const CDevice& other) const
{
return strncmp(other.uuid, uuid, sizeof(uuid)) == 0 && strncmp(other.name, name, sizeof(name)) == 0;
}
/**
CDevice::Collect
--------------------
Use CUDA API to collect a summary of the hipDeviceProp_t properties
regarding all attached devices into the vector of CDevice argument.
When ordinal_from_index=true the CDevice.ordinal value is taken
from the index in the order returned by hipGetDeviceProperties(&p, i)
**/
void CDevice::Collect(std::vector<CDevice>& devices, bool ordinal_from_index)
{
int devCount;
hipGetDeviceCount(&devCount);
LOG(LEVEL) << "hipGetDeviceCount : " << devCount ;
for (int i = 0; i < devCount; ++i)
{
hipDeviceProp_t p;
hipGetDeviceProperties(&p, i);
CDevice d ;
assert( sizeof(p.name) == sizeof(char)*256 ) ;
strncpy( d.name, p.name, sizeof(p.name) );
#ifndef CUDART_VERSION
#error CUDART_VERSION Undefined!
#elif (CUDART_VERSION >= 10000)
assert( sizeof(p.uuid) == sizeof(uuid) );
strncpy( d.uuid, p.uuid.bytes, sizeof(p.uuid) );
#elif (CUDART_VERSION >= 9000)
#endif
d.index = i ;
d.ordinal = ordinal_from_index ? i : -1 ;
d.major = p.major ;
d.minor = p.minor ;
d.compute_capability = p.major*10 + p.minor ;
d.multiProcessorCount = p.multiProcessorCount ;
d.totalGlobalMem = p.totalGlobalMem ;
devices.push_back(d);
}
}
int CDevice::Size()
{
return
sizeof(int) + // ordinal
sizeof(int) + // index
sizeof(char)*256 + // name
sizeof(char)*16 + // uuid
sizeof(int) + // major
sizeof(int) + // minor
sizeof(int) + // compute_capability
sizeof(int) + // multiProcessorCount
sizeof(size_t) ; // totalGlobalMem
}
void CDevice::write( std::ostream& out ) const
{
int size = Size();
char* buffer = new char[size];
char* p = buffer ;
memcpy( p, &ordinal, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( p, &index, sizeof(index) ) ; p += sizeof(index) ;
memcpy( p, name, sizeof(name) ) ; p += sizeof(name) ;
memcpy( p, uuid, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( p, &major, sizeof(major) ) ; p += sizeof(major) ;
memcpy( p, &minor, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( p, &compute_capability, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( p, &multiProcessorCount, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( p, &totalGlobalMem, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
out.write(buffer, size);
assert( p - buffer == size );
delete [] buffer ;
}
void CDevice::read( std::istream& in )
{
int size = Size();
char* buffer = new char[size];
in.read(buffer, size);
char* p = buffer ;
memcpy( &ordinal, p, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( &index, p, sizeof(index) ) ; p += sizeof(index) ;
memcpy( name, p, sizeof(name) ) ; p += sizeof(name) ;
memcpy( uuid, p, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( &major, p, sizeof(major) ) ; p += sizeof(major) ;
memcpy( &minor, p, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( &compute_capability, p, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( &multiProcessorCount,p, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( &totalGlobalMem, p, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
delete [] buffer ;
}
/**
CDevice::Visible
------------------
This assumes that the ordinal is the index when all GPUs are visible
and it finds this by arranging to persist the query when
CUDA_VISIBLE_DEVICES is not defined and use that to provide something
to match against when the envvar is defined.
Initially tried to do this in one go by changing envvar
and repeating the query. But that doesnt work,
presumably as the CUDA_VISIBLE_DEVICES value only has
any effect when cuda is initialized.
Of course the disadvantage of this approach
is that need to arrange to do the persisting of all devices
at some initialization time and need to find an
appropriate place for the file.
The purpose is for reference running, especially performance
scanning : so its acceptable to require running a metadata
capturing executable prior to scanning.
Possibly NVML can provide a better solution, see nvml-
Actually maybe not : the NVML enumeration order follows nvidia-smi
not CUDA.
**/
void CDevice::Visible(std::vector<CDevice>& visible, const char* dirpath, bool nosave)
{
char* cvd = getenv(CVD);
bool no_cvd = cvd == NULL ;
std::vector<CDevice> all ;
bool ordinal_from_index = no_cvd ;
Collect(visible, ordinal_from_index);
if( no_cvd )
{
LOG(LEVEL) << " no_cvd " ;
if(!nosave)
Save( visible, dirpath );
}
else
{
LOG(LEVEL) << " with cvd " << cvd ;
Load(all, dirpath);
for(unsigned i=0 ; i < visible.size() ; i++)
{
CDevice& v = visible[i] ;
v.ordinal = FindIndexOfMatchingDevice( v, all );
}
}
}
/**
CDevice::FindIndexOfMatchingDevice
------------------------------------
**/
int CDevice::FindIndexOfMatchingDevice( const CDevice& d, const std::vector<CDevice>& all )
{
int index = -1 ;
LOG(LEVEL)
<< " d " << d.desc()
<< " all.size " << all.size()
;
for(unsigned i=0 ; i < all.size() ; i++)
{
const CDevice& a = all[i] ;
bool m = a.matches(d) ;
LOG(LEVEL)
<< " a " << a.desc()
<< " m " << m
;
if(m)
{
index = a.index ;
break ;
}
}
LOG(LEVEL) << " index : " << index ;
return index ;
}
void CDevice::Dump( const std::vector<CDevice>& devices, const char* msg )
{
LOG(info) << msg << "[" << Brief(devices) << "]" ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
LOG(info) << d.desc();
}
}
const char* CDevice::FILENAME = "CDevice.bin" ;
std::string CDevice::Path(const char* dirpath)
{
std::stringstream ss ;
if( dirpath ) ss << dirpath << "/" ;
ss << FILENAME ;
return ss.str();
}
void CDevice::PrepDir(const char* dirpath)
{
mkdirp(dirpath, 0777);
}
void CDevice::Save( const std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
PrepDir(dirpath);
LOG(LEVEL) << "path " << path ;
std::ofstream out(path.c_str(), std::ofstream::binary);
if(out.fail())
{
LOG(error) << " failed open for " << path ;
return ;
}
for(unsigned i = 0 ; i < devices.size() ; ++i )
{
const CDevice& d = devices[i] ;
d.write(out);
}
}
void CDevice::Load( std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
LOG(LEVEL)
<< "dirpath " << dirpath
<< "path " << path
;
std::ifstream in(path.c_str(), std::ofstream::binary);
CDevice d ;
while(true)
{
d.read(in);
if(in.eof()) return ;
if(in.fail())
{
LOG(error) << " failed read from " << path ;
return ;
}
devices.push_back(d);
}
}
std::string CDevice::Brief( const std::vector<CDevice>& devices )
{
std::stringstream ss ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
ss << d.ordinal << ':' ;
for(unsigned j=0 ; j < strlen(d.name) ; j++)
{
char c = *(d.name+j) ;
ss << ( c == ' ' ? '_' : c ) ;
}
if( i < devices.size() - 1 ) ss << ' ' ;
}
return ss.str();
}
| a0745fefc70cef48a97e04498471be11480db5ba.cu |
#include <sstream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <fstream>
#include <cuda_runtime_api.h>
#include "CDevice.hh"
#include "LaunchCommon.hh" // mkdirp
#include "PLOG.hh"
const plog::Severity CDevice::LEVEL = PLOG::EnvLevel("CDevice", "DEBUG");
const char* CDevice::CVD = "CUDA_VISIBLE_DEVICES" ;
const char* CDevice::desc() const
{
std::stringstream ss ;
// uuid is not printable
ss << "CDevice"
<< " index " << index
<< " ordinal " << ordinal
<< " name " << name
<< " major " << major
<< " minor " << minor
<< " compute_capability " << compute_capability
<< " multiProcessorCount " << multiProcessorCount
<< " totalGlobalMem " << totalGlobalMem
;
std::string s = ss.str();
return strdup(s.c_str());
}
bool CDevice::matches(const CDevice& other) const
{
return strncmp(other.uuid, uuid, sizeof(uuid)) == 0 && strncmp(other.name, name, sizeof(name)) == 0;
}
/**
CDevice::Collect
--------------------
Use CUDA API to collect a summary of the cudaDeviceProp properties
regarding all attached devices into the vector of CDevice argument.
When ordinal_from_index=true the CDevice.ordinal value is taken
from the index in the order returned by cudaGetDeviceProperties(&p, i)
**/
void CDevice::Collect(std::vector<CDevice>& devices, bool ordinal_from_index)
{
int devCount;
cudaGetDeviceCount(&devCount);
LOG(LEVEL) << "cudaGetDeviceCount : " << devCount ;
for (int i = 0; i < devCount; ++i)
{
cudaDeviceProp p;
cudaGetDeviceProperties(&p, i);
CDevice d ;
assert( sizeof(p.name) == sizeof(char)*256 ) ;
strncpy( d.name, p.name, sizeof(p.name) );
#ifndef CUDART_VERSION
#error CUDART_VERSION Undefined!
#elif (CUDART_VERSION >= 10000)
assert( sizeof(p.uuid) == sizeof(uuid) );
strncpy( d.uuid, p.uuid.bytes, sizeof(p.uuid) );
#elif (CUDART_VERSION >= 9000)
#endif
d.index = i ;
d.ordinal = ordinal_from_index ? i : -1 ;
d.major = p.major ;
d.minor = p.minor ;
d.compute_capability = p.major*10 + p.minor ;
d.multiProcessorCount = p.multiProcessorCount ;
d.totalGlobalMem = p.totalGlobalMem ;
devices.push_back(d);
}
}
int CDevice::Size()
{
return
sizeof(int) + // ordinal
sizeof(int) + // index
sizeof(char)*256 + // name
sizeof(char)*16 + // uuid
sizeof(int) + // major
sizeof(int) + // minor
sizeof(int) + // compute_capability
sizeof(int) + // multiProcessorCount
sizeof(size_t) ; // totalGlobalMem
}
void CDevice::write( std::ostream& out ) const
{
int size = Size();
char* buffer = new char[size];
char* p = buffer ;
memcpy( p, &ordinal, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( p, &index, sizeof(index) ) ; p += sizeof(index) ;
memcpy( p, name, sizeof(name) ) ; p += sizeof(name) ;
memcpy( p, uuid, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( p, &major, sizeof(major) ) ; p += sizeof(major) ;
memcpy( p, &minor, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( p, &compute_capability, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( p, &multiProcessorCount, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( p, &totalGlobalMem, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
out.write(buffer, size);
assert( p - buffer == size );
delete [] buffer ;
}
void CDevice::read( std::istream& in )
{
int size = Size();
char* buffer = new char[size];
in.read(buffer, size);
char* p = buffer ;
memcpy( &ordinal, p, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( &index, p, sizeof(index) ) ; p += sizeof(index) ;
memcpy( name, p, sizeof(name) ) ; p += sizeof(name) ;
memcpy( uuid, p, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( &major, p, sizeof(major) ) ; p += sizeof(major) ;
memcpy( &minor, p, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( &compute_capability, p, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( &multiProcessorCount,p, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( &totalGlobalMem, p, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
delete [] buffer ;
}
/**
CDevice::Visible
------------------
This assumes that the ordinal is the index when all GPUs are visible
and it finds this by arranging to persist the query when
CUDA_VISIBLE_DEVICES is not defined and use that to provide something
to match against when the envvar is defined.
Initially tried to do this in one go by changing envvar
and repeating the query. But that doesnt work,
presumably as the CUDA_VISIBLE_DEVICES value only has
any effect when cuda is initialized.
Of course the disadvantage of this approach
is that need to arrange to do the persisting of all devices
at some initialization time and need to find an
appropriate place for the file.
The purpose is for reference running, especially performance
scanning : so its acceptable to require running a metadata
capturing executable prior to scanning.
Possibly NVML can provide a better solution, see nvml-
Actually maybe not : the NVML enumeration order follows nvidia-smi
not CUDA.
**/
void CDevice::Visible(std::vector<CDevice>& visible, const char* dirpath, bool nosave)
{
char* cvd = getenv(CVD);
bool no_cvd = cvd == NULL ;
std::vector<CDevice> all ;
bool ordinal_from_index = no_cvd ;
Collect(visible, ordinal_from_index);
if( no_cvd )
{
LOG(LEVEL) << " no_cvd " ;
if(!nosave)
Save( visible, dirpath );
}
else
{
LOG(LEVEL) << " with cvd " << cvd ;
Load(all, dirpath);
for(unsigned i=0 ; i < visible.size() ; i++)
{
CDevice& v = visible[i] ;
v.ordinal = FindIndexOfMatchingDevice( v, all );
}
}
}
/**
CDevice::FindIndexOfMatchingDevice
------------------------------------
**/
int CDevice::FindIndexOfMatchingDevice( const CDevice& d, const std::vector<CDevice>& all )
{
int index = -1 ;
LOG(LEVEL)
<< " d " << d.desc()
<< " all.size " << all.size()
;
for(unsigned i=0 ; i < all.size() ; i++)
{
const CDevice& a = all[i] ;
bool m = a.matches(d) ;
LOG(LEVEL)
<< " a " << a.desc()
<< " m " << m
;
if(m)
{
index = a.index ;
break ;
}
}
LOG(LEVEL) << " index : " << index ;
return index ;
}
void CDevice::Dump( const std::vector<CDevice>& devices, const char* msg )
{
LOG(info) << msg << "[" << Brief(devices) << "]" ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
LOG(info) << d.desc();
}
}
const char* CDevice::FILENAME = "CDevice.bin" ;
std::string CDevice::Path(const char* dirpath)
{
std::stringstream ss ;
if( dirpath ) ss << dirpath << "/" ;
ss << FILENAME ;
return ss.str();
}
void CDevice::PrepDir(const char* dirpath)
{
mkdirp(dirpath, 0777);
}
void CDevice::Save( const std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
PrepDir(dirpath);
LOG(LEVEL) << "path " << path ;
std::ofstream out(path.c_str(), std::ofstream::binary);
if(out.fail())
{
LOG(error) << " failed open for " << path ;
return ;
}
for(unsigned i = 0 ; i < devices.size() ; ++i )
{
const CDevice& d = devices[i] ;
d.write(out);
}
}
void CDevice::Load( std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
LOG(LEVEL)
<< "dirpath " << dirpath
<< "path " << path
;
std::ifstream in(path.c_str(), std::ofstream::binary);
CDevice d ;
while(true)
{
d.read(in);
if(in.eof()) return ;
if(in.fail())
{
LOG(error) << " failed read from " << path ;
return ;
}
devices.push_back(d);
}
}
std::string CDevice::Brief( const std::vector<CDevice>& devices )
{
std::stringstream ss ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
ss << d.ordinal << ':' ;
for(unsigned j=0 ; j < strlen(d.name) ; j++)
{
char c = *(d.name+j) ;
ss << ( c == ' ' ? '_' : c ) ;
}
if( i < devices.size() - 1 ) ss << ' ' ;
}
return ss.str();
}
|
8b579dcf932aa07b2b9144e80556e4ca1399e623.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include "cmeans.h"
#include "cmeans_kernel.cu"
#include "MDL.h"
#include "timers.h"
#include <rocblas.h>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
void printCudaError() {
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
printf("%s\n",hipGetErrorString(error));
}
}
bool InitCUDA(void)
{
int count = 0;
int i = 0;
int device = -1;
int num_procs = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
printf("There are %d devices.\n",count);
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
printf("Device #%d - %s, Version: %d.%d\n",i,prop.name,prop.major,prop.minor);
// Check if CUDA capable device
if(prop.major >= 1) {
if(prop.multiProcessorCount > num_procs) {
device = i;
num_procs = prop.multiProcessorCount;
}
}
}
}
if(device == -1) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
device = DEVICE;
printf("Using Device %d\n",device);
CUDA_SAFE_CALL(hipSetDevice(device));
DEBUG("CUDA initialized.\n");
return true;
}
#endif
unsigned int timer_io; // Timer for I/O, such as reading FCS file and outputting result files
unsigned int timer_memcpy; // Timer for GPU <---> CPU memory copying
unsigned int timer_cpu; // Timer for processing on CPU
unsigned int timer_gpu; // Timer for kernels on the GPU
unsigned int timer_total; // Total time
/************************************************************************/
/* C-means Main */
/************************************************************************/
int main(int argc, char* argv[])
{
CUT_SAFE_CALL(cutCreateTimer(&timer_io));
CUT_SAFE_CALL(cutCreateTimer(&timer_memcpy));
CUT_SAFE_CALL(cutCreateTimer(&timer_cpu));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu));
CUT_SAFE_CALL(cutCreateTimer(&timer_total));
if(!InitCUDA()) {
return 0;
}
CUT_SAFE_CALL(cutStartTimer(timer_total));
CUT_SAFE_CALL(cutStartTimer(timer_io));
// [program name] [data file]
if(argc != 2){
printf("Usage: %s data.csv\n",argv[0]);
return 1;
}
DEBUG("Parsing input file\n");
float* myEvents = ParseSampleInput(argv[1]);
if(myEvents == NULL){
printf("Error reading input file. Exiting.\n");
return 1;
}
DEBUG("Finished parsing input file\n");
CUT_SAFE_CALL(cutStopTimer(timer_io));
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
//cublasStatus status;
//status = hipblasInit();
//if(status != HIPBLAS_STATUS_SUCCESS) {
// printf("!!! CUBLAS initialization error\n");
//}
// Seed random generator, used for choosing initial cluster centers
srand((unsigned)(time(0)));
//srand(42);
float* myClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
float* newClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
clock_t total_start;
total_start = clock();
// Select random cluster centers
DEBUG("Randomly choosing initial cluster centers.\n");
generateInitialClusters(myClusters, myEvents);
// Transpose the events matrix
// Threads within a block access consecutive events, not consecutive dimensions
// So we need the data aligned this way for coaelsced global reads for event data
DEBUG("Transposing data matrix.\n");
float* transposedEvents = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
for(int i=0; i<NUM_EVENTS; i++) {
for(int j=0; j<NUM_DIMENSIONS; j++) {
transposedEvents[j*NUM_EVENTS+i] = myEvents[i*NUM_DIMENSIONS+j];
}
}
float* memberships = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_EVENTS);
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
int size;
#if !CPU_ONLY
DEBUG("Allocating memory on GPU.\n");
float* d_distanceMatrix;
CUDA_SAFE_CALL(hipMalloc((void**)&d_distanceMatrix, sizeof(float)*NUM_EVENTS*NUM_CLUSTERS));
#if !LINEAR
float* d_memberships;
CUDA_SAFE_CALL(hipMalloc((void**)&d_memberships, sizeof(float)*NUM_EVENTS*NUM_CLUSTERS));
#endif
float* d_E;
CUDA_SAFE_CALL(hipMalloc((void**)&d_E, sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS));
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**)&d_C, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_nC;
CUDA_SAFE_CALL(hipMalloc((void**)&d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_sizes;
CUDA_SAFE_CALL(hipMalloc((void**)&d_sizes, sizeof(float)*NUM_CLUSTERS));
float* sizes = (float*) malloc(sizeof(float)*NUM_CLUSTERS);
size = sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS;
CUDA_SAFE_CALL(hipMemcpy(d_E, transposedEvents, size, hipMemcpyHostToDevice)); // temporary
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
DEBUG("Copying input data to GPU.\n");
size = sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS;
//CUDA_SAFE_CALL(hipMemcpy(d_E, myEvents, size, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_E, transposedEvents, size, hipMemcpyHostToDevice));
DEBUG("Copying initial cluster centers to GPU.\n");
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
#endif
clock_t cpu_start, cpu_stop;
float diff, max_change;
cpu_start = clock();
PRINT("Starting C-means\n");
float averageTime = 0;
int iterations = 0;
// memory size for cluster centers
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
int num_blocks_distance = NUM_EVENTS / NUM_THREADS_DISTANCE;
if(NUM_EVENTS % NUM_THREADS_DISTANCE) {
num_blocks_distance++;
}
int num_blocks_membership = NUM_EVENTS / NUM_THREADS_MEMBERSHIP;
if(NUM_EVENTS % NUM_THREADS_DISTANCE) {
num_blocks_membership++;
}
int num_blocks_update = NUM_CLUSTERS / NUM_CLUSTERS_PER_BLOCK;
if(NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_blocks_update++;
}
do{
#if CPU_ONLY
clock_t start,stop;
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
DEBUG("Starting UpdateCenters kernel.\n");
//start = clock();
//UpdateClusterCentersCPU_Naive(myClusters, myEvents, newClusters);
//stop = clock();
//printf("Processing time for Method 1: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
#if !LINEAR
start = clock();
UpdateClusterCentersCPU_Optimized(myClusters, myEvents, newClusters);
stop = clock();
DEBUG("Processing time for Quadratic Method: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
#else
start = clock();
UpdateClusterCentersCPU_Linear(myClusters, myEvents, newClusters);
stop = clock();
DEBUG("Processing time for Linear Method: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
#endif
DEBUG("Processing time for CPU: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
averageTime += (float)(cpu_stop - cpu_start)/(float)(CLOCKS_PER_SEC)*(float)1e3;
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
#else
unsigned int timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStartTimer(timer_gpu));
DEBUG("Launching ComputeDistanceMatrix kernel\n");
hipLaunchKernelGGL(( ComputeDistanceMatrix), dim3(dim3(num_blocks_distance,NUM_CLUSTERS)), dim3(NUM_THREADS_DISTANCE) , 0, 0, d_C, d_E, d_distanceMatrix);
//ComputeDistanceMatrixNoShared<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix);
#if LINEAR
// Optimized, O(M)
DEBUG("Launching ComputeMembershipLinearMatrix kernel\n");
hipLaunchKernelGGL(( ComputeMembershipMatrixLinear), dim3(num_blocks_membership), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix);
hipLaunchKernelGGL(( UpdateClusterCentersGPU2), dim3(dim3(num_blocks_update,NUM_DIMENSIONS)), dim3(NUM_THREADS_UPDATE) , 0, 0, d_C, d_E, d_nC, d_distanceMatrix);
#else
// Using unoptimized, O(M^2)
DEBUG("Launching ComputeMembershipMatrix kernel\n");
hipLaunchKernelGGL(( ComputeMembershipMatrix), dim3(dim3(num_blocks_membership,NUM_CLUSTERS)), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, d_memberships);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships);
hipLaunchKernelGGL(( UpdateClusterCentersGPU2), dim3(dim3(num_blocks_update,NUM_DIMENSIONS)), dim3(NUM_THREADS_UPDATE) , 0, 0, d_C, d_E, d_nC, d_memberships);
#endif
hipDeviceSynchronize();
// CUBLAS SGEMM: data*transpose(memberships)
// Transposes are flipped in SGEMM call b/c routine expects column-major (fortran style) data
/*
hipblasSgemm('t','n',NUM_DIMENSIONS,NUM_CLUSTERS,NUM_EVENTS,1.0,d_E,NUM_EVENTS,d_distanceMatrix,NUM_EVENTS,0.0,d_nC,NUM_DIMENSIONS);
status = hipblasGetError();
if(status != HIPBLAS_STATUS_SUCCESS) {
printf("Cublas kernel error!\n");
return 1;
}
hipDeviceSynchronize();
*/
//hipblasSgemv('t',NUM_EVENTS,NUM_DIMENSIONS,1.0,d_E,NUM_EVENTS,d_distanceMatrix,1,0,d_nC,1);
DEBUG(hipGetErrorString(hipGetLastError()));
DEBUG("\n");
CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
DEBUG("Copying centers from GPU\n");
CUDA_SAFE_CALL(hipMemcpy(newClusters, d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS, hipMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
// Still need to calculate denominators and divide to get actual centers
CUT_SAFE_CALL(cutStartTimer(timer_gpu));
#if LINEAR
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_distanceMatrix, d_sizes );
#else
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_memberships, d_sizes );
#endif
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
hipMemcpy(sizes,d_sizes,sizeof(float)*NUM_CLUSTERS, hipMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
for(int i=0; i < NUM_CLUSTERS; i++) {
DEBUG("Size %d: %f\n",i,sizes[i]);
}
for(int i=0; i < NUM_CLUSTERS; i++) {
for(int j=0; j < NUM_DIMENSIONS; j++) {
newClusters[i*NUM_DIMENSIONS+j] /= sizes[i];
}
}
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
CUT_SAFE_CALL(cutStopTimer(timer));
float thisTime = cutGetTimerValue(timer);
DEBUG("Iteration time for GPU: %f (ms) \n", thisTime);
averageTime += thisTime;
CUT_SAFE_CALL(cutDeleteTimer(timer));
#endif
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
diff = 0.0;
max_change = 0.0;
for(int i=0; i < NUM_CLUSTERS; i++){
DEBUG("Center %d: ",i);
for(int k = 0; k < NUM_DIMENSIONS; k++){
DEBUG("%.2f ",newClusters[i*NUM_DIMENSIONS + k]);
diff += fabs(myClusters[i*NUM_DIMENSIONS + k] - newClusters[i*NUM_DIMENSIONS + k]);
max_change = fmaxf(max_change, fabs(myClusters[i*NUM_DIMENSIONS + k] - newClusters[i*NUM_DIMENSIONS + k]));
myClusters[i*NUM_DIMENSIONS + k] = newClusters[i*NUM_DIMENSIONS + k];
}
DEBUG("\n");
}
DEBUG("Iteration %d, Total Change = %e, Max Change = %e\n", iterations, diff, max_change);
iterations++;
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
} while((iterations < MIN_ITERS) || (max_change > THRESHOLD && iterations < MAX_ITERS));
#if !CPU_ONLY
DEBUG("Computing final memberships\n");
//CUT_SAFE_CALL(cutStartTimer(timer_gpu));
hipLaunchKernelGGL(( ComputeDistanceMatrix), dim3(dim3(num_blocks_distance,NUM_CLUSTERS)), dim3(NUM_THREADS_DISTANCE) , 0, 0, d_C, d_E, d_distanceMatrix);
#if LINEAR
hipLaunchKernelGGL(( ComputeNormalizedMembershipMatrixLinear), dim3(num_blocks_membership), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix);
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_distanceMatrix, d_sizes );
#else
hipLaunchKernelGGL(( ComputeNormalizedMembershipMatrix), dim3(dim3(num_blocks_membership,NUM_CLUSTERS)), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, d_memberships);
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_memberships, d_sizes );
#endif
hipDeviceSynchronize();
//CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
hipMemcpy(sizes,d_sizes,sizeof(float)*NUM_CLUSTERS, hipMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
DEBUG("Copying memberships from GPU\n");
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
#if LINEAR
CUDA_SAFE_CALL(hipMemcpy(memberships,d_distanceMatrix,sizeof(float)*NUM_CLUSTERS*NUM_EVENTS,hipMemcpyDeviceToHost));
#else
CUDA_SAFE_CALL(hipMemcpy(memberships,d_memberships,sizeof(float)*NUM_CLUSTERS*NUM_EVENTS,hipMemcpyDeviceToHost));
#endif
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
#endif
if(iterations == MAX_ITERS){
PRINT("Warning: Did not converge to the %f threshold provided\n", THRESHOLD);
PRINT("Last total change was: %e\n",diff);
PRINT("Last maximum change was: %e\n",max_change);
} else {
PRINT("Converged after iterations: %d\n",iterations);
}
cpu_stop = clock();
CUT_SAFE_CALL(cutStartTimer(timer_io));
averageTime /= iterations;
printf("\nTotal Processing time: %f (s) \n", (float)(cpu_stop - cpu_start)/(float)(CLOCKS_PER_SEC));
printf("\n");
CUT_SAFE_CALL(cutStopTimer(timer_io));
int* finalClusterConfig;
float mdlTime = 0;
#if ENABLE_MDL
#if CPU_ONLY
finalClusterConfig = MDL(myEvents, myClusters, &mdlTime, argv[1]);
#else
finalClusterConfig = MDLGPU(d_E, d_nC, d_distanceMatrix, &mdlTime, argv[1]);
mdlTime /= 1000.0; // CUDA timer returns time in milliseconds, normalize to seconds
#endif
#else
finalClusterConfig = (int*) malloc(sizeof(int)*NUM_CLUSTERS);
memset(finalClusterConfig,1,sizeof(int)*NUM_CLUSTERS);
#endif
CUT_SAFE_CALL(cutStartTimer(timer_io));
// Filters out the final clusters (Based on MDL)
PRINT("Final Clusters are:\n");
int newCount = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
if(finalClusterConfig[i]){
#if !CPU_ONLY
PRINT("N=%.1f\n",newCount,sizes[i]);
#endif
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[newCount * NUM_DIMENSIONS + j] = myClusters[i*NUM_DIMENSIONS + j];
PRINT("%.2f\t", myClusters[i*NUM_DIMENSIONS + j]);
}
newCount++;
PRINT("\n");
}
}
#if ENABLE_OUTPUT
ReportSummary(newClusters, newCount, argv[1]);
ReportResults(myEvents, memberships, newCount, argv[1]);
#endif
CUT_SAFE_CALL(cutStopTimer(timer_io));
free(newClusters);
free(myClusters);
free(myEvents);
#if !CPU_ONLY
CUDA_SAFE_CALL(hipFree(d_E));
CUDA_SAFE_CALL(hipFree(d_C));
CUDA_SAFE_CALL(hipFree(d_nC));
#endif
CUT_SAFE_CALL(cutStopTimer(timer_total));
printf("\n\n");
printf("Total Time (ms): %f\n",cutGetTimerValue(timer_total));
printf("I/O Time (ms): %f\n",cutGetTimerValue(timer_io));
printf("CPU processing Time (ms): %f\n",cutGetTimerValue(timer_cpu));
printf("GPU processing Time (ms): %f\n",cutGetTimerValue(timer_gpu));
printf("GPU memcpy Time (ms): %f\n",cutGetTimerValue(timer_memcpy));
printf("\n\n");
return 0;
}
void generateInitialClusters(float* clusters, float* events){
int seed;
srand(time(NULL));
for(int i = 0; i < NUM_CLUSTERS; i++){
#if RANDOM_SEED
seed = rand() % NUM_EVENTS;
#else
seed = i * NUM_EVENTS / NUM_CLUSTERS;
#endif
for(int j = 0; j < NUM_DIMENSIONS; j++){
clusters[i*NUM_DIMENSIONS + j] = events[seed*NUM_DIMENSIONS + j];
}
}
}
__host__ float CalculateDistanceCPU(const float* clusters, const float* events, int clusterIndex, int eventIndex){
float sum = 0;
#if DISTANCE_MEASURE == 0
for(int i = 0; i < NUM_DIMENSIONS; i++){
float tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += tmp*tmp;
}
sum = sqrt(sum+1e-30);
#endif
#if DISTANCE_MEASURE == 1
for(int i = 0; i < NUM_DIMENSIONS; i++){
float tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += abs(tmp)+1e-30;
}
#endif
#if DISTANCE_MEASURE == 2
for(int i = 0; i < NUM_DIMENSIONS; i++){
float tmp = abs(events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]);
if(tmp > sum)
sum = tmp+1e-30;
}
#endif
return sum;
}
__host__ float MembershipValue(const float* clusters, const float* events, int clusterIndex, int eventIndex){
float myClustDist = CalculateDistanceCPU(clusters, events, clusterIndex, eventIndex);
float sum =0;
float otherClustDist;
for(int j = 0; j< NUM_CLUSTERS; j++){
otherClustDist = CalculateDistanceCPU(clusters, events, j, eventIndex);
sum += powf((float)(myClustDist/otherClustDist),(2.0f/(FUZZINESS-1.0f)));
}
return 1.0f/sum;
}
void UpdateClusterCentersCPU_Linear(const float* oldClusters, const float* events, float* newClusters){
//float membershipValue, sum, denominator;
float membershipValue, denominator;
float* numerator = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* denominators = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* distances = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* memberships = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
for(int i = 0; i < NUM_DIMENSIONS*NUM_CLUSTERS; i++) {
newClusters[i] = 0;
}
for(int i = 0; i < NUM_CLUSTERS; i++) {
numerator[i] = 0;
denominators[i] = 0;
}
for(int n = 0; n < NUM_EVENTS; n++){
denominator = 0.0f;
for(int c = 0; c < NUM_CLUSTERS; c++){
distances[c] = CalculateDistanceCPU(oldClusters, events, c, n);
numerator[c] = powf(distances[c],2.0f/(FUZZINESS-1.0f))+1e-30; // prevents divide by zero error if distance is really small and powf makes it underflow
denominator = denominator + 1.0f/numerator[c];
}
// Add contribution to numerator and denominator
for(int c = 0; c < NUM_CLUSTERS; c++){
membershipValue = 1.0f/powf(numerator[c]*denominator,(float)FUZZINESS);
for(int d = 0; d < NUM_DIMENSIONS; d++){
newClusters[c*NUM_DIMENSIONS+d] += events[n*NUM_DIMENSIONS+d]*membershipValue;
}
denominators[c] += membershipValue;
}
}
// Final cluster centers
for(int c = 0; c < NUM_CLUSTERS; c++){
for(int d = 0; d < NUM_DIMENSIONS; d++){
newClusters[c*NUM_DIMENSIONS + d] = newClusters[c*NUM_DIMENSIONS+d]/denominators[c];
}
}
free(numerator);
free(denominators);
free(distances);
free(memberships);
}
void UpdateClusterCentersCPU_Optimized(const float* oldClusters, const float* events, float* newClusters){
//float membershipValue, sum, denominator;
float membershipValue, denominator;
float* numerator = (float*)malloc(sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
float* denominators = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* distances = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* memberships = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
for(int i = 0; i < NUM_DIMENSIONS*NUM_CLUSTERS; i++)
numerator[i] = 0;
for(int i = 0; i < NUM_CLUSTERS; i++)
denominators[i] = 0;
float sum;
for(int n = 0; n < NUM_EVENTS; n++){
// Calculate distance to each cluster center
for(int c = 0; c < NUM_CLUSTERS; c++){
distances[c] = CalculateDistanceCPU(oldClusters, events, c, n);
}
// Convert distances into memberships
for(int c = 0; c < NUM_CLUSTERS; c++){
sum = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
sum += powf((float)(distances[c]/distances[i]),(2.0f/(FUZZINESS-1.0f)));
}
memberships[c] = 1.0f/sum;
}
// Add contribution to numerator and denominator
for(int c = 0; c < NUM_CLUSTERS; c++){
membershipValue = memberships[c]*memberships[c];
for(int d = 0; d < NUM_DIMENSIONS; d++){
numerator[c*NUM_DIMENSIONS+d] += events[n*NUM_DIMENSIONS+d]*membershipValue;
}
denominators[c] += membershipValue;
}
}
// Final cluster centers
for(int c = 0; c < NUM_CLUSTERS; c++){
for(int d = 0; d < NUM_DIMENSIONS; d++){
newClusters[c*NUM_DIMENSIONS + d] = numerator[c*NUM_DIMENSIONS+d]/denominators[c];
}
}
free(numerator);
free(denominators);
free(distances);
free(memberships);
}
void UpdateClusterCentersCPU_Naive(const float* oldClusters, const float* events, float* newClusters){
//float membershipValue, sum, denominator;
float membershipValue, denominator;
float* numerator = (float*)malloc(sizeof(float)*NUM_DIMENSIONS);
float* denominators = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* distances = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
for(int i = 0; i < NUM_CLUSTERS; i++){
denominator = 0.0;
for(int j = 0; j < NUM_DIMENSIONS; j++)
numerator[j] = 0;
for(int j = 0; j < NUM_EVENTS; j++){
membershipValue = MembershipValue(oldClusters, events, i, j);
for(int k = 0; k < NUM_DIMENSIONS; k++){
numerator[k] += events[j*NUM_DIMENSIONS + k]*membershipValue*membershipValue;
}
denominator += membershipValue;
}
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[i*NUM_DIMENSIONS + j] = numerator[j]/denominator;
}
}
free(numerator);
free(denominators);
free(distances);
}
float* readBIN(char* f) {
FILE* fin = fopen(f,"rb");
int nevents,ndims;
fread(&nevents,4,1,fin);
fread(&ndims,4,1,fin);
int num_elements = (ndims)*(nevents);
printf("Number of rows: %d\n",nevents);
printf("Number of cols: %d\n",ndims);
float* data = (float*) malloc(sizeof(float)*num_elements);
fread(data,sizeof(float),num_elements,fin);
fclose(fin);
return data;
}
float* readCSV(char* filename) {
FILE* myfile = fopen(filename, "r");
if(myfile == NULL){
printf("Error: File DNE\n");
return NULL;
}
char myline[10000];
float* retVal = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
myfile = fopen(filename, "r");
#if LINE_LABELS
fgets(myline, 10000, myfile);
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 10000, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#else
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 10000, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#endif
fclose(myfile);
return retVal;
}
float* ParseSampleInput(char* f){
int length = strlen(f);
printf("File Extension: %s\n",f+length-3);
if(strcmp(f+length-3,"bin") == 0) {
return readBIN(f);
} else {
return readCSV(f);
}
}
void FreeMatrix(float* d_matrix){
CUDA_SAFE_CALL(hipFree(d_matrix));
}
float* BuildQGPU(float* d_events, float* d_clusters, float* d_distanceMatrix, float* mdlTime){
float* d_matrix;
int size = sizeof(float) * NUM_CLUSTERS*NUM_CLUSTERS;
unsigned int timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
hipMalloc((void**)&d_matrix, size);
printCudaError();
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStartTimer(timer_gpu));
dim3 grid(NUM_CLUSTERS, NUM_CLUSTERS);
printf("Launching Q Matrix Kernel\n");
hipLaunchKernelGGL(( CalculateQMatrixGPUUpgrade), dim3(grid), dim3(Q_THREADS), 0, 0, d_events, d_clusters, d_matrix, d_distanceMatrix);
hipDeviceSynchronize();
printCudaError();
CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
float* matrix = (float*)malloc(size);
printf("Copying results to CPU\n");
hipError_t error = hipMemcpy(matrix, d_matrix, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printCudaError();
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStopTimer(timer));
*mdlTime = cutGetTimerValue(timer);
printf("Processing time for GPU: %f (ms) \n", *mdlTime);
CUT_SAFE_CALL(cutDeleteTimer(timer));
FreeMatrix(d_matrix);
printf("Q Matrix:\n");
for(int row=0; row < NUM_CLUSTERS; row++) {
for(int col=0; col < NUM_CLUSTERS; col++) {
printf("%f ",matrix[row*NUM_CLUSTERS+col]);
}
printf("\n");
}
return matrix;
}
| 8b579dcf932aa07b2b9144e80556e4ca1399e623.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include "cmeans.h"
#include "cmeans_kernel.cu"
#include "MDL.h"
#include "timers.h"
#include <cublas.h>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
void printCudaError() {
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
printf("%s\n",cudaGetErrorString(error));
}
}
bool InitCUDA(void)
{
int count = 0;
int i = 0;
int device = -1;
int num_procs = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
printf("There are %d devices.\n",count);
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
printf("Device #%d - %s, Version: %d.%d\n",i,prop.name,prop.major,prop.minor);
// Check if CUDA capable device
if(prop.major >= 1) {
if(prop.multiProcessorCount > num_procs) {
device = i;
num_procs = prop.multiProcessorCount;
}
}
}
}
if(device == -1) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
device = DEVICE;
printf("Using Device %d\n",device);
CUDA_SAFE_CALL(cudaSetDevice(device));
DEBUG("CUDA initialized.\n");
return true;
}
#endif
unsigned int timer_io; // Timer for I/O, such as reading FCS file and outputting result files
unsigned int timer_memcpy; // Timer for GPU <---> CPU memory copying
unsigned int timer_cpu; // Timer for processing on CPU
unsigned int timer_gpu; // Timer for kernels on the GPU
unsigned int timer_total; // Total time
/************************************************************************/
/* C-means Main */
/************************************************************************/
int main(int argc, char* argv[])
{
CUT_SAFE_CALL(cutCreateTimer(&timer_io));
CUT_SAFE_CALL(cutCreateTimer(&timer_memcpy));
CUT_SAFE_CALL(cutCreateTimer(&timer_cpu));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu));
CUT_SAFE_CALL(cutCreateTimer(&timer_total));
if(!InitCUDA()) {
return 0;
}
CUT_SAFE_CALL(cutStartTimer(timer_total));
CUT_SAFE_CALL(cutStartTimer(timer_io));
// [program name] [data file]
if(argc != 2){
printf("Usage: %s data.csv\n",argv[0]);
return 1;
}
DEBUG("Parsing input file\n");
float* myEvents = ParseSampleInput(argv[1]);
if(myEvents == NULL){
printf("Error reading input file. Exiting.\n");
return 1;
}
DEBUG("Finished parsing input file\n");
CUT_SAFE_CALL(cutStopTimer(timer_io));
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
//cublasStatus status;
//status = cublasInit();
//if(status != CUBLAS_STATUS_SUCCESS) {
// printf("!!! CUBLAS initialization error\n");
//}
// Seed random generator, used for choosing initial cluster centers
srand((unsigned)(time(0)));
//srand(42);
float* myClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
float* newClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
clock_t total_start;
total_start = clock();
// Select random cluster centers
DEBUG("Randomly choosing initial cluster centers.\n");
generateInitialClusters(myClusters, myEvents);
// Transpose the events matrix
// Threads within a block access consecutive events, not consecutive dimensions
// So we need the data aligned this way for coaelsced global reads for event data
DEBUG("Transposing data matrix.\n");
float* transposedEvents = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
for(int i=0; i<NUM_EVENTS; i++) {
for(int j=0; j<NUM_DIMENSIONS; j++) {
transposedEvents[j*NUM_EVENTS+i] = myEvents[i*NUM_DIMENSIONS+j];
}
}
float* memberships = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_EVENTS);
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
int size;
#if !CPU_ONLY
DEBUG("Allocating memory on GPU.\n");
float* d_distanceMatrix;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_distanceMatrix, sizeof(float)*NUM_EVENTS*NUM_CLUSTERS));
#if !LINEAR
float* d_memberships;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_memberships, sizeof(float)*NUM_EVENTS*NUM_CLUSTERS));
#endif
float* d_E;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_E, sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS));
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_C, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_nC;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_sizes;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_sizes, sizeof(float)*NUM_CLUSTERS));
float* sizes = (float*) malloc(sizeof(float)*NUM_CLUSTERS);
size = sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS;
CUDA_SAFE_CALL(cudaMemcpy(d_E, transposedEvents, size, cudaMemcpyHostToDevice)); // temporary
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
DEBUG("Copying input data to GPU.\n");
size = sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS;
//CUDA_SAFE_CALL(cudaMemcpy(d_E, myEvents, size, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_E, transposedEvents, size, cudaMemcpyHostToDevice));
DEBUG("Copying initial cluster centers to GPU.\n");
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
#endif
clock_t cpu_start, cpu_stop;
float diff, max_change;
cpu_start = clock();
PRINT("Starting C-means\n");
float averageTime = 0;
int iterations = 0;
// memory size for cluster centers
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
int num_blocks_distance = NUM_EVENTS / NUM_THREADS_DISTANCE;
if(NUM_EVENTS % NUM_THREADS_DISTANCE) {
num_blocks_distance++;
}
int num_blocks_membership = NUM_EVENTS / NUM_THREADS_MEMBERSHIP;
if(NUM_EVENTS % NUM_THREADS_DISTANCE) {
num_blocks_membership++;
}
int num_blocks_update = NUM_CLUSTERS / NUM_CLUSTERS_PER_BLOCK;
if(NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_blocks_update++;
}
do{
#if CPU_ONLY
clock_t start,stop;
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
DEBUG("Starting UpdateCenters kernel.\n");
//start = clock();
//UpdateClusterCentersCPU_Naive(myClusters, myEvents, newClusters);
//stop = clock();
//printf("Processing time for Method 1: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
#if !LINEAR
start = clock();
UpdateClusterCentersCPU_Optimized(myClusters, myEvents, newClusters);
stop = clock();
DEBUG("Processing time for Quadratic Method: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
#else
start = clock();
UpdateClusterCentersCPU_Linear(myClusters, myEvents, newClusters);
stop = clock();
DEBUG("Processing time for Linear Method: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
#endif
DEBUG("Processing time for CPU: %f (ms) \n", (float)(stop - start)/(float)(CLOCKS_PER_SEC)*(float)1e3);
averageTime += (float)(cpu_stop - cpu_start)/(float)(CLOCKS_PER_SEC)*(float)1e3;
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
#else
unsigned int timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStartTimer(timer_gpu));
DEBUG("Launching ComputeDistanceMatrix kernel\n");
ComputeDistanceMatrix<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix);
//ComputeDistanceMatrixNoShared<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix);
#if LINEAR
// Optimized, O(M)
DEBUG("Launching ComputeMembershipLinearMatrix kernel\n");
ComputeMembershipMatrixLinear<<< num_blocks_membership, NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix);
UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix);
#else
// Using unoptimized, O(M^2)
DEBUG("Launching ComputeMembershipMatrix kernel\n");
ComputeMembershipMatrix<<< dim3(num_blocks_membership,NUM_CLUSTERS), NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, d_memberships);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships);
UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships);
#endif
cudaThreadSynchronize();
// CUBLAS SGEMM: data*transpose(memberships)
// Transposes are flipped in SGEMM call b/c routine expects column-major (fortran style) data
/*
cublasSgemm('t','n',NUM_DIMENSIONS,NUM_CLUSTERS,NUM_EVENTS,1.0,d_E,NUM_EVENTS,d_distanceMatrix,NUM_EVENTS,0.0,d_nC,NUM_DIMENSIONS);
status = cublasGetError();
if(status != CUBLAS_STATUS_SUCCESS) {
printf("Cublas kernel error!\n");
return 1;
}
cudaThreadSynchronize();
*/
//cublasSgemv('t',NUM_EVENTS,NUM_DIMENSIONS,1.0,d_E,NUM_EVENTS,d_distanceMatrix,1,0,d_nC,1);
DEBUG(cudaGetErrorString(cudaGetLastError()));
DEBUG("\n");
CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
DEBUG("Copying centers from GPU\n");
CUDA_SAFE_CALL(cudaMemcpy(newClusters, d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS, cudaMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
// Still need to calculate denominators and divide to get actual centers
CUT_SAFE_CALL(cutStartTimer(timer_gpu));
#if LINEAR
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_distanceMatrix, d_sizes );
#else
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_memberships, d_sizes );
#endif
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
cudaMemcpy(sizes,d_sizes,sizeof(float)*NUM_CLUSTERS, cudaMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
for(int i=0; i < NUM_CLUSTERS; i++) {
DEBUG("Size %d: %f\n",i,sizes[i]);
}
for(int i=0; i < NUM_CLUSTERS; i++) {
for(int j=0; j < NUM_DIMENSIONS; j++) {
newClusters[i*NUM_DIMENSIONS+j] /= sizes[i];
}
}
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
CUT_SAFE_CALL(cutStopTimer(timer));
float thisTime = cutGetTimerValue(timer);
DEBUG("Iteration time for GPU: %f (ms) \n", thisTime);
averageTime += thisTime;
CUT_SAFE_CALL(cutDeleteTimer(timer));
#endif
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
diff = 0.0;
max_change = 0.0;
for(int i=0; i < NUM_CLUSTERS; i++){
DEBUG("Center %d: ",i);
for(int k = 0; k < NUM_DIMENSIONS; k++){
DEBUG("%.2f ",newClusters[i*NUM_DIMENSIONS + k]);
diff += fabs(myClusters[i*NUM_DIMENSIONS + k] - newClusters[i*NUM_DIMENSIONS + k]);
max_change = fmaxf(max_change, fabs(myClusters[i*NUM_DIMENSIONS + k] - newClusters[i*NUM_DIMENSIONS + k]));
myClusters[i*NUM_DIMENSIONS + k] = newClusters[i*NUM_DIMENSIONS + k];
}
DEBUG("\n");
}
DEBUG("Iteration %d, Total Change = %e, Max Change = %e\n", iterations, diff, max_change);
iterations++;
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
} while((iterations < MIN_ITERS) || (max_change > THRESHOLD && iterations < MAX_ITERS));
#if !CPU_ONLY
DEBUG("Computing final memberships\n");
//CUT_SAFE_CALL(cutStartTimer(timer_gpu));
ComputeDistanceMatrix<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix);
#if LINEAR
ComputeNormalizedMembershipMatrixLinear<<< num_blocks_membership, NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix);
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_distanceMatrix, d_sizes );
#else
ComputeNormalizedMembershipMatrix<<< dim3(num_blocks_membership,NUM_CLUSTERS), NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, d_memberships);
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_memberships, d_sizes );
#endif
cudaThreadSynchronize();
//CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
cudaMemcpy(sizes,d_sizes,sizeof(float)*NUM_CLUSTERS, cudaMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
DEBUG("Copying memberships from GPU\n");
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
#if LINEAR
CUDA_SAFE_CALL(cudaMemcpy(memberships,d_distanceMatrix,sizeof(float)*NUM_CLUSTERS*NUM_EVENTS,cudaMemcpyDeviceToHost));
#else
CUDA_SAFE_CALL(cudaMemcpy(memberships,d_memberships,sizeof(float)*NUM_CLUSTERS*NUM_EVENTS,cudaMemcpyDeviceToHost));
#endif
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
#endif
if(iterations == MAX_ITERS){
PRINT("Warning: Did not converge to the %f threshold provided\n", THRESHOLD);
PRINT("Last total change was: %e\n",diff);
PRINT("Last maximum change was: %e\n",max_change);
} else {
PRINT("Converged after iterations: %d\n",iterations);
}
cpu_stop = clock();
CUT_SAFE_CALL(cutStartTimer(timer_io));
averageTime /= iterations;
printf("\nTotal Processing time: %f (s) \n", (float)(cpu_stop - cpu_start)/(float)(CLOCKS_PER_SEC));
printf("\n");
CUT_SAFE_CALL(cutStopTimer(timer_io));
int* finalClusterConfig;
float mdlTime = 0;
#if ENABLE_MDL
#if CPU_ONLY
finalClusterConfig = MDL(myEvents, myClusters, &mdlTime, argv[1]);
#else
finalClusterConfig = MDLGPU(d_E, d_nC, d_distanceMatrix, &mdlTime, argv[1]);
mdlTime /= 1000.0; // CUDA timer returns time in milliseconds, normalize to seconds
#endif
#else
finalClusterConfig = (int*) malloc(sizeof(int)*NUM_CLUSTERS);
memset(finalClusterConfig,1,sizeof(int)*NUM_CLUSTERS);
#endif
CUT_SAFE_CALL(cutStartTimer(timer_io));
// Filters out the final clusters (Based on MDL)
PRINT("Final Clusters are:\n");
int newCount = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
if(finalClusterConfig[i]){
#if !CPU_ONLY
PRINT("N=%.1f\n",newCount,sizes[i]);
#endif
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[newCount * NUM_DIMENSIONS + j] = myClusters[i*NUM_DIMENSIONS + j];
PRINT("%.2f\t", myClusters[i*NUM_DIMENSIONS + j]);
}
newCount++;
PRINT("\n");
}
}
#if ENABLE_OUTPUT
ReportSummary(newClusters, newCount, argv[1]);
ReportResults(myEvents, memberships, newCount, argv[1]);
#endif
CUT_SAFE_CALL(cutStopTimer(timer_io));
free(newClusters);
free(myClusters);
free(myEvents);
#if !CPU_ONLY
CUDA_SAFE_CALL(cudaFree(d_E));
CUDA_SAFE_CALL(cudaFree(d_C));
CUDA_SAFE_CALL(cudaFree(d_nC));
#endif
CUT_SAFE_CALL(cutStopTimer(timer_total));
printf("\n\n");
printf("Total Time (ms): %f\n",cutGetTimerValue(timer_total));
printf("I/O Time (ms): %f\n",cutGetTimerValue(timer_io));
printf("CPU processing Time (ms): %f\n",cutGetTimerValue(timer_cpu));
printf("GPU processing Time (ms): %f\n",cutGetTimerValue(timer_gpu));
printf("GPU memcpy Time (ms): %f\n",cutGetTimerValue(timer_memcpy));
printf("\n\n");
return 0;
}
void generateInitialClusters(float* clusters, float* events){
int seed;
srand(time(NULL));
for(int i = 0; i < NUM_CLUSTERS; i++){
#if RANDOM_SEED
seed = rand() % NUM_EVENTS;
#else
seed = i * NUM_EVENTS / NUM_CLUSTERS;
#endif
for(int j = 0; j < NUM_DIMENSIONS; j++){
clusters[i*NUM_DIMENSIONS + j] = events[seed*NUM_DIMENSIONS + j];
}
}
}
__host__ float CalculateDistanceCPU(const float* clusters, const float* events, int clusterIndex, int eventIndex){
float sum = 0;
#if DISTANCE_MEASURE == 0
for(int i = 0; i < NUM_DIMENSIONS; i++){
float tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += tmp*tmp;
}
sum = sqrt(sum+1e-30);
#endif
#if DISTANCE_MEASURE == 1
for(int i = 0; i < NUM_DIMENSIONS; i++){
float tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += abs(tmp)+1e-30;
}
#endif
#if DISTANCE_MEASURE == 2
for(int i = 0; i < NUM_DIMENSIONS; i++){
float tmp = abs(events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]);
if(tmp > sum)
sum = tmp+1e-30;
}
#endif
return sum;
}
__host__ float MembershipValue(const float* clusters, const float* events, int clusterIndex, int eventIndex){
float myClustDist = CalculateDistanceCPU(clusters, events, clusterIndex, eventIndex);
float sum =0;
float otherClustDist;
for(int j = 0; j< NUM_CLUSTERS; j++){
otherClustDist = CalculateDistanceCPU(clusters, events, j, eventIndex);
sum += powf((float)(myClustDist/otherClustDist),(2.0f/(FUZZINESS-1.0f)));
}
return 1.0f/sum;
}
void UpdateClusterCentersCPU_Linear(const float* oldClusters, const float* events, float* newClusters){
//float membershipValue, sum, denominator;
float membershipValue, denominator;
float* numerator = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* denominators = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* distances = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* memberships = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
for(int i = 0; i < NUM_DIMENSIONS*NUM_CLUSTERS; i++) {
newClusters[i] = 0;
}
for(int i = 0; i < NUM_CLUSTERS; i++) {
numerator[i] = 0;
denominators[i] = 0;
}
for(int n = 0; n < NUM_EVENTS; n++){
denominator = 0.0f;
for(int c = 0; c < NUM_CLUSTERS; c++){
distances[c] = CalculateDistanceCPU(oldClusters, events, c, n);
numerator[c] = powf(distances[c],2.0f/(FUZZINESS-1.0f))+1e-30; // prevents divide by zero error if distance is really small and powf makes it underflow
denominator = denominator + 1.0f/numerator[c];
}
// Add contribution to numerator and denominator
for(int c = 0; c < NUM_CLUSTERS; c++){
membershipValue = 1.0f/powf(numerator[c]*denominator,(float)FUZZINESS);
for(int d = 0; d < NUM_DIMENSIONS; d++){
newClusters[c*NUM_DIMENSIONS+d] += events[n*NUM_DIMENSIONS+d]*membershipValue;
}
denominators[c] += membershipValue;
}
}
// Final cluster centers
for(int c = 0; c < NUM_CLUSTERS; c++){
for(int d = 0; d < NUM_DIMENSIONS; d++){
newClusters[c*NUM_DIMENSIONS + d] = newClusters[c*NUM_DIMENSIONS+d]/denominators[c];
}
}
free(numerator);
free(denominators);
free(distances);
free(memberships);
}
void UpdateClusterCentersCPU_Optimized(const float* oldClusters, const float* events, float* newClusters){
//float membershipValue, sum, denominator;
float membershipValue, denominator;
float* numerator = (float*)malloc(sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
float* denominators = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* distances = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* memberships = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
for(int i = 0; i < NUM_DIMENSIONS*NUM_CLUSTERS; i++)
numerator[i] = 0;
for(int i = 0; i < NUM_CLUSTERS; i++)
denominators[i] = 0;
float sum;
for(int n = 0; n < NUM_EVENTS; n++){
// Calculate distance to each cluster center
for(int c = 0; c < NUM_CLUSTERS; c++){
distances[c] = CalculateDistanceCPU(oldClusters, events, c, n);
}
// Convert distances into memberships
for(int c = 0; c < NUM_CLUSTERS; c++){
sum = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
sum += powf((float)(distances[c]/distances[i]),(2.0f/(FUZZINESS-1.0f)));
}
memberships[c] = 1.0f/sum;
}
// Add contribution to numerator and denominator
for(int c = 0; c < NUM_CLUSTERS; c++){
membershipValue = memberships[c]*memberships[c];
for(int d = 0; d < NUM_DIMENSIONS; d++){
numerator[c*NUM_DIMENSIONS+d] += events[n*NUM_DIMENSIONS+d]*membershipValue;
}
denominators[c] += membershipValue;
}
}
// Final cluster centers
for(int c = 0; c < NUM_CLUSTERS; c++){
for(int d = 0; d < NUM_DIMENSIONS; d++){
newClusters[c*NUM_DIMENSIONS + d] = numerator[c*NUM_DIMENSIONS+d]/denominators[c];
}
}
free(numerator);
free(denominators);
free(distances);
free(memberships);
}
void UpdateClusterCentersCPU_Naive(const float* oldClusters, const float* events, float* newClusters){
//float membershipValue, sum, denominator;
float membershipValue, denominator;
float* numerator = (float*)malloc(sizeof(float)*NUM_DIMENSIONS);
float* denominators = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
float* distances = (float*)malloc(sizeof(float)*NUM_CLUSTERS);
for(int i = 0; i < NUM_CLUSTERS; i++){
denominator = 0.0;
for(int j = 0; j < NUM_DIMENSIONS; j++)
numerator[j] = 0;
for(int j = 0; j < NUM_EVENTS; j++){
membershipValue = MembershipValue(oldClusters, events, i, j);
for(int k = 0; k < NUM_DIMENSIONS; k++){
numerator[k] += events[j*NUM_DIMENSIONS + k]*membershipValue*membershipValue;
}
denominator += membershipValue;
}
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[i*NUM_DIMENSIONS + j] = numerator[j]/denominator;
}
}
free(numerator);
free(denominators);
free(distances);
}
float* readBIN(char* f) {
FILE* fin = fopen(f,"rb");
int nevents,ndims;
fread(&nevents,4,1,fin);
fread(&ndims,4,1,fin);
int num_elements = (ndims)*(nevents);
printf("Number of rows: %d\n",nevents);
printf("Number of cols: %d\n",ndims);
float* data = (float*) malloc(sizeof(float)*num_elements);
fread(data,sizeof(float),num_elements,fin);
fclose(fin);
return data;
}
float* readCSV(char* filename) {
FILE* myfile = fopen(filename, "r");
if(myfile == NULL){
printf("Error: File DNE\n");
return NULL;
}
char myline[10000];
float* retVal = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
myfile = fopen(filename, "r");
#if LINE_LABELS
fgets(myline, 10000, myfile);
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 10000, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#else
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 10000, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#endif
fclose(myfile);
return retVal;
}
float* ParseSampleInput(char* f){
int length = strlen(f);
printf("File Extension: %s\n",f+length-3);
if(strcmp(f+length-3,"bin") == 0) {
return readBIN(f);
} else {
return readCSV(f);
}
}
void FreeMatrix(float* d_matrix){
CUDA_SAFE_CALL(cudaFree(d_matrix));
}
float* BuildQGPU(float* d_events, float* d_clusters, float* d_distanceMatrix, float* mdlTime){
float* d_matrix;
int size = sizeof(float) * NUM_CLUSTERS*NUM_CLUSTERS;
unsigned int timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
cudaMalloc((void**)&d_matrix, size);
printCudaError();
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStartTimer(timer_gpu));
dim3 grid(NUM_CLUSTERS, NUM_CLUSTERS);
printf("Launching Q Matrix Kernel\n");
CalculateQMatrixGPUUpgrade<<<grid, Q_THREADS>>>(d_events, d_clusters, d_matrix, d_distanceMatrix);
cudaThreadSynchronize();
printCudaError();
CUT_SAFE_CALL(cutStopTimer(timer_gpu));
CUT_SAFE_CALL(cutStartTimer(timer_memcpy));
float* matrix = (float*)malloc(size);
printf("Copying results to CPU\n");
cudaError_t error = cudaMemcpy(matrix, d_matrix, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printCudaError();
CUT_SAFE_CALL(cutStopTimer(timer_memcpy));
CUT_SAFE_CALL(cutStopTimer(timer));
*mdlTime = cutGetTimerValue(timer);
printf("Processing time for GPU: %f (ms) \n", *mdlTime);
CUT_SAFE_CALL(cutDeleteTimer(timer));
FreeMatrix(d_matrix);
printf("Q Matrix:\n");
for(int row=0; row < NUM_CLUSTERS; row++) {
for(int col=0; col < NUM_CLUSTERS; col++) {
printf("%f ",matrix[row*NUM_CLUSTERS+col]);
}
printf("\n");
}
return matrix;
}
|
e0c90e8109fdd153e4de0c5149cf6fd4dcc90677.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "rdma.h"
#include "fifo.h"
}
#include <fcntl.h>
#include <unistd.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#define M 1536UL
#define N M
#define K M
#define LDB1 M
#define LDB2 LDB1*N
#define LDC1 N
#define SUB_M 512UL
#define HUGEPAGE_SZ (4UL * 1024UL * 1024UL * 1024UL)
#define AGGREGATED_SZ (SUB_M * SUB_M * SUB_M * 8UL)
// #define IO_QUEUE_SZ (HUGEPAGE_SZ / AGGREGATED_SZ / 2UL)
#define IO_QUEUE_SZ 2UL
#define NITERS 4UL
void print_config(struct config_t config);
struct fetch_conf {
struct resources *res;
uint64_t m, sub_m;
double *sub_B;
char *hugepage_addr;
struct fifo *request_queue;
struct fifo *io_queue;
};
struct request_conf {
struct resources *res;
struct fifo *complete_queue;
struct fifo *request_queue;
uint64_t id;
uint64_t m, sub_m;
};
struct fifo_entry {
double *sub_B;
};
int memcpyFromMmap(struct fetch_conf *conf, char *dst, const char *src, const size_t length) {
struct response *res = NULL;
res = sock_read_offset(conf->res->sock);
if (res == NULL) {
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
// if (res->id == 0) {
// printf("fetching row [%lu:%lu]\n", res->x, res->y);
// } else {
// printf("fetching col [%lu:%lu]\n", res->x, res->y);
// }
memcpy(dst, src + res->offset, length);
free(res);
if (sock_write_data(conf->res->sock)) { /* just send a dummy char back and forth */
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
return 0;
}
void *fetch_thread(void *args) {
struct fetch_conf *conf = (struct fetch_conf *) args;
uint64_t n, m, k;
uint64_t dsize = SUB_M * SUB_M * SUB_M;
double *ptr;
struct fifo_entry *entry = NULL;
uint64_t count = 0;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
entry = (struct fifo_entry *) fifo_pop(conf->request_queue);
ptr = conf->sub_B + dsize * (count % IO_QUEUE_SZ);
memcpyFromMmap(conf, (char *) ptr, (char *) conf->res->buf, dsize * sizeof(double));
count++;
entry->sub_B = ptr;
fifo_push(conf->io_queue, entry);
}
}
}
return NULL;
}
void *request_thread(void *args) {
struct request_conf *conf = (struct request_conf *) args;
uint64_t n, m, k;
struct fifo_entry *entry = NULL;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
entry = (struct fifo_entry *) fifo_pop(conf->complete_queue);
// TODO: need to add one more param
sock_write_request(conf->res->req_sock, conf->id, n, m, SUB_M, 4, k);
sock_read_data(conf->res->req_sock);
fifo_push(conf->request_queue, entry);
}
}
}
sock_write_request(conf->res->req_sock, -1, n, m, k, 1, 0);
sock_read_data(conf->res->req_sock);
return NULL;
}
void generate_data(double *array, uint64_t size) {
uint64_t i;
for (i = 0; i < size; i++) {
// array[i] = (((double) rand())/RAND_MAX - 0.5)*100;
array[i] = i;
}
}
uint64_t verify(const double *answer_C, const double *C, uint64_t size) {
uint64_t i, error = 0;
for (i = 0; i < size; i++) {
if (answer_C[i] != C[i]) {
printf("index %lu error, answer_C: %f, C: %f\n", i, answer_C[i], C[i]);
error++;
return error;
}
}
return error;
}
void reassemble_block_tensor_from_seq(const double *seq_matrix, double *result_matrix, size_t n, size_t sub_n, size_t i, size_t j, size_t k) {
size_t ii, jj, kk, a, b, c;
for (ii = i, a = 0; ii < i+sub_n; ii++, a++) {
for (jj = j, b = 0; jj < j+sub_n; jj++, b++) {
// printf("i=%lu, j=%lu, k=%lu, result_offset=%lu, seq_offset=%lu\n", ii, jj, k, b * sub_n + a * sub_n * sub_n, (ii * n * n * sizeof(double) + jj * n * sizeof(double) + k * sizeof(double)));
for (kk = k, c = 0; kk < k+sub_n; kk++, c++) {
result_matrix[c + b * sub_n + a * sub_n * sub_n] = seq_matrix[ii * n * n + jj * n + kk];
}
}
}
}
int nds_tensor_verify(struct resources *res, uint64_t id, uint64_t size, uint64_t sub_size, const double *verify_matrix) {
double *sub_B, *verify_sub_B;
size_t i, n, m, k, nn, mm, kk, a, b, c;
size_t error = 0;
size_t total_iteration;
struct fifo *request_queue;
struct fifo *io_queue;
struct fifo *complete_queue;
struct fifo_entry *entries = (struct fifo_entry *) calloc(IO_QUEUE_SZ, sizeof(struct fifo_entry));
struct fifo_entry *entry = NULL;
pthread_t f_thread_id;
struct fetch_conf f_conf;
pthread_t r_thread_id;
struct request_conf r_conf;
struct timeval h_start, h_end;
long duration;
// initialization
total_iteration = (M / SUB_M) * (M / SUB_M) * (M / SUB_M);
// it causes problem if size == 1
request_queue = fifo_new(IO_QUEUE_SZ * 2);
if (request_queue == NULL) {
printf("cannot create request_queue\n");
return -1;
}
io_queue = fifo_new(IO_QUEUE_SZ * 2);
if (io_queue == NULL) {
printf("cannot create io_queue\n");
return -1;
}
complete_queue = fifo_new(IO_QUEUE_SZ * 2);
if (complete_queue == NULL) {
printf("cannot create complete_queue\n");
return -1;
}
for (i = 0; i < IO_QUEUE_SZ; i++) {
fifo_push(complete_queue, entries + i);
}
sub_B = (double *) malloc(SUB_M * SUB_M * SUB_M * sizeof(double) * IO_QUEUE_SZ);
verify_sub_B = (double *) malloc(SUB_M * SUB_M * SUB_M * sizeof(double));
r_conf.res = res;
r_conf.id = id;
r_conf.m = M;
r_conf.sub_m = SUB_M;
r_conf.request_queue = request_queue;
r_conf.complete_queue = complete_queue;
// create thread here
f_conf.res = res;
f_conf.m = M;
f_conf.sub_m = SUB_M;
f_conf.io_queue = io_queue;
f_conf.request_queue = request_queue;
f_conf.sub_B = sub_B;
pthread_create(&r_thread_id, NULL, request_thread, &r_conf);
pthread_create(&f_thread_id, NULL, fetch_thread, &f_conf);
gettimeofday(&h_start, NULL);
// blockGEMM
for (n = 0; n < N; n+=SUB_M) {
for (m = 0; m < M; m+=SUB_M) {
for (k = 0; k < K; k+=SUB_M) {
// memcpy?
entry = (struct fifo_entry *) fifo_pop(io_queue);
reassemble_block_tensor_from_seq(verify_matrix, verify_sub_B, M, SUB_M, n, m, k);
error += verify(verify_sub_B, entry->sub_B, SUB_M * SUB_M * SUB_M);
fifo_push(complete_queue, entry);
}
}
}
pthread_join(r_thread_id, NULL);
pthread_join(f_thread_id, NULL);
gettimeofday(&h_end, NULL);
duration = ((h_end.tv_sec - h_start.tv_sec) * 1000000) + (h_end.tv_usec - h_start.tv_usec);
printf("Verify duration: %f ms\n", (float) duration / 1000);
printf("error: %lu\n", error);
if (error == 0) {
printf("TEST PASSED!\n");
}
free(sub_B);
free(verify_sub_B);
fifo_free(io_queue);
fifo_free(request_queue);
free(entries);
return 0;
}
/******************************************************************************
* Function: print_config
*
* Input
* none
*
* Output
* none
*
* Returns
* none
*
* Description
* Print out config information
******************************************************************************/
void print_config(struct config_t config) {
fprintf(stdout, " ------------------------------------------------\n");
fprintf(stdout, " Device name : \"%s\"\n", config.dev_name);
fprintf(stdout, " IB port : %u\n", config.ib_port);
if (config.server_name)
fprintf(stdout, " IP : %s\n", config.server_name);
fprintf(stdout, " TCP port : %u\n", config.tcp_port);
if (config.gid_idx >= 0)
fprintf(stdout, " GID index : %u\n", config.gid_idx);
fprintf(stdout, " ------------------------------------------------\n\n");
}
int main(int argc, char *argv[]) {
int rc = 0;
uint64_t matrix_id, n, sub_n;
int verify_fd, hugepage_fd;
char *hugepage_addr;
double *verify_matrix;
// RDMA
struct resources res;
struct config_t config = {
"mlx4_0", /* dev_name */
"127.0.0.1", /* server_name */
19875, /* tcp_port */
1, /* ib_port */
0 /* gid_idx */
};
// default the iteration is 4 times
if (argc < 4) {
printf("usage: %s <matrix_id> <verify_matrix_path> <port>\n", argv[0]);
exit(1);
}
matrix_id = (uint64_t) atoll(argv[1]);
config.tcp_port = atoi(argv[3]);
/* print the used parameters for info*/
print_config(config);
printf("mapping hugepage\n");
hugepage_fd = open("/dev/hugepages/tensorstore", O_RDWR, 0755);
if (hugepage_fd < 0) {
perror("open");
exit(1);
}
hugepage_addr = (char *) mmap(0, BUF_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, hugepage_fd, 0);
if (hugepage_addr==MAP_FAILED) {
perror("mmap");
exit(1);
}
res.buf = hugepage_addr;
memset(hugepage_addr, 0, BUF_SIZE);
printf("hugepage starting address is: %p\n", hugepage_addr);
printf("socket connection\n");
rc = make_two_tcp_connection(&res, &config);
if (rc < 0) {
perror("sock connect");
exit(1);
}
verify_fd = open(argv[2], O_RDONLY);
if (verify_fd < 0) {
perror("open");
exit(1);
}
verify_matrix = (double *) mmap(0, M * N * K * sizeof(double), PROT_READ, MAP_PRIVATE, verify_fd, 0);
if (verify_matrix==MAP_FAILED) {
perror("mmap");
exit(1);
}
printf("calculating the result of pagerank\n");
rc = nds_tensor_verify(&res, matrix_id, M, SUB_M, verify_matrix);
close(res.sock);
close(res.req_sock);
munmap(hugepage_addr, BUF_SIZE);
close(hugepage_fd);
munmap(verify_matrix, M * N * K * sizeof(double));
close(verify_fd);
return rc;
}
| e0c90e8109fdd153e4de0c5149cf6fd4dcc90677.cu | extern "C" {
#include "rdma.h"
#include "fifo.h"
}
#include <fcntl.h>
#include <unistd.h>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#define M 1536UL
#define N M
#define K M
#define LDB1 M
#define LDB2 LDB1*N
#define LDC1 N
#define SUB_M 512UL
#define HUGEPAGE_SZ (4UL * 1024UL * 1024UL * 1024UL)
#define AGGREGATED_SZ (SUB_M * SUB_M * SUB_M * 8UL)
// #define IO_QUEUE_SZ (HUGEPAGE_SZ / AGGREGATED_SZ / 2UL)
#define IO_QUEUE_SZ 2UL
#define NITERS 4UL
void print_config(struct config_t config);
struct fetch_conf {
struct resources *res;
uint64_t m, sub_m;
double *sub_B;
char *hugepage_addr;
struct fifo *request_queue;
struct fifo *io_queue;
};
struct request_conf {
struct resources *res;
struct fifo *complete_queue;
struct fifo *request_queue;
uint64_t id;
uint64_t m, sub_m;
};
struct fifo_entry {
double *sub_B;
};
int memcpyFromMmap(struct fetch_conf *conf, char *dst, const char *src, const size_t length) {
struct response *res = NULL;
res = sock_read_offset(conf->res->sock);
if (res == NULL) {
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
// if (res->id == 0) {
// printf("fetching row [%lu:%lu]\n", res->x, res->y);
// } else {
// printf("fetching col [%lu:%lu]\n", res->x, res->y);
// }
memcpy(dst, src + res->offset, length);
free(res);
if (sock_write_data(conf->res->sock)) { /* just send a dummy char back and forth */
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
return 0;
}
void *fetch_thread(void *args) {
struct fetch_conf *conf = (struct fetch_conf *) args;
uint64_t n, m, k;
uint64_t dsize = SUB_M * SUB_M * SUB_M;
double *ptr;
struct fifo_entry *entry = NULL;
uint64_t count = 0;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
entry = (struct fifo_entry *) fifo_pop(conf->request_queue);
ptr = conf->sub_B + dsize * (count % IO_QUEUE_SZ);
memcpyFromMmap(conf, (char *) ptr, (char *) conf->res->buf, dsize * sizeof(double));
count++;
entry->sub_B = ptr;
fifo_push(conf->io_queue, entry);
}
}
}
return NULL;
}
void *request_thread(void *args) {
struct request_conf *conf = (struct request_conf *) args;
uint64_t n, m, k;
struct fifo_entry *entry = NULL;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
entry = (struct fifo_entry *) fifo_pop(conf->complete_queue);
// TODO: need to add one more param
sock_write_request(conf->res->req_sock, conf->id, n, m, SUB_M, 4, k);
sock_read_data(conf->res->req_sock);
fifo_push(conf->request_queue, entry);
}
}
}
sock_write_request(conf->res->req_sock, -1, n, m, k, 1, 0);
sock_read_data(conf->res->req_sock);
return NULL;
}
void generate_data(double *array, uint64_t size) {
uint64_t i;
for (i = 0; i < size; i++) {
// array[i] = (((double) rand())/RAND_MAX - 0.5)*100;
array[i] = i;
}
}
uint64_t verify(const double *answer_C, const double *C, uint64_t size) {
uint64_t i, error = 0;
for (i = 0; i < size; i++) {
if (answer_C[i] != C[i]) {
printf("index %lu error, answer_C: %f, C: %f\n", i, answer_C[i], C[i]);
error++;
return error;
}
}
return error;
}
void reassemble_block_tensor_from_seq(const double *seq_matrix, double *result_matrix, size_t n, size_t sub_n, size_t i, size_t j, size_t k) {
size_t ii, jj, kk, a, b, c;
for (ii = i, a = 0; ii < i+sub_n; ii++, a++) {
for (jj = j, b = 0; jj < j+sub_n; jj++, b++) {
// printf("i=%lu, j=%lu, k=%lu, result_offset=%lu, seq_offset=%lu\n", ii, jj, k, b * sub_n + a * sub_n * sub_n, (ii * n * n * sizeof(double) + jj * n * sizeof(double) + k * sizeof(double)));
for (kk = k, c = 0; kk < k+sub_n; kk++, c++) {
result_matrix[c + b * sub_n + a * sub_n * sub_n] = seq_matrix[ii * n * n + jj * n + kk];
}
}
}
}
int nds_tensor_verify(struct resources *res, uint64_t id, uint64_t size, uint64_t sub_size, const double *verify_matrix) {
double *sub_B, *verify_sub_B;
size_t i, n, m, k, nn, mm, kk, a, b, c;
size_t error = 0;
size_t total_iteration;
struct fifo *request_queue;
struct fifo *io_queue;
struct fifo *complete_queue;
struct fifo_entry *entries = (struct fifo_entry *) calloc(IO_QUEUE_SZ, sizeof(struct fifo_entry));
struct fifo_entry *entry = NULL;
pthread_t f_thread_id;
struct fetch_conf f_conf;
pthread_t r_thread_id;
struct request_conf r_conf;
struct timeval h_start, h_end;
long duration;
// initialization
total_iteration = (M / SUB_M) * (M / SUB_M) * (M / SUB_M);
// it causes problem if size == 1
request_queue = fifo_new(IO_QUEUE_SZ * 2);
if (request_queue == NULL) {
printf("cannot create request_queue\n");
return -1;
}
io_queue = fifo_new(IO_QUEUE_SZ * 2);
if (io_queue == NULL) {
printf("cannot create io_queue\n");
return -1;
}
complete_queue = fifo_new(IO_QUEUE_SZ * 2);
if (complete_queue == NULL) {
printf("cannot create complete_queue\n");
return -1;
}
for (i = 0; i < IO_QUEUE_SZ; i++) {
fifo_push(complete_queue, entries + i);
}
sub_B = (double *) malloc(SUB_M * SUB_M * SUB_M * sizeof(double) * IO_QUEUE_SZ);
verify_sub_B = (double *) malloc(SUB_M * SUB_M * SUB_M * sizeof(double));
r_conf.res = res;
r_conf.id = id;
r_conf.m = M;
r_conf.sub_m = SUB_M;
r_conf.request_queue = request_queue;
r_conf.complete_queue = complete_queue;
// create thread here
f_conf.res = res;
f_conf.m = M;
f_conf.sub_m = SUB_M;
f_conf.io_queue = io_queue;
f_conf.request_queue = request_queue;
f_conf.sub_B = sub_B;
pthread_create(&r_thread_id, NULL, request_thread, &r_conf);
pthread_create(&f_thread_id, NULL, fetch_thread, &f_conf);
gettimeofday(&h_start, NULL);
// blockGEMM
for (n = 0; n < N; n+=SUB_M) {
for (m = 0; m < M; m+=SUB_M) {
for (k = 0; k < K; k+=SUB_M) {
// memcpy?
entry = (struct fifo_entry *) fifo_pop(io_queue);
reassemble_block_tensor_from_seq(verify_matrix, verify_sub_B, M, SUB_M, n, m, k);
error += verify(verify_sub_B, entry->sub_B, SUB_M * SUB_M * SUB_M);
fifo_push(complete_queue, entry);
}
}
}
pthread_join(r_thread_id, NULL);
pthread_join(f_thread_id, NULL);
gettimeofday(&h_end, NULL);
duration = ((h_end.tv_sec - h_start.tv_sec) * 1000000) + (h_end.tv_usec - h_start.tv_usec);
printf("Verify duration: %f ms\n", (float) duration / 1000);
printf("error: %lu\n", error);
if (error == 0) {
printf("TEST PASSED!\n");
}
free(sub_B);
free(verify_sub_B);
fifo_free(io_queue);
fifo_free(request_queue);
free(entries);
return 0;
}
/******************************************************************************
* Function: print_config
*
* Input
* none
*
* Output
* none
*
* Returns
* none
*
* Description
* Print out config information
******************************************************************************/
void print_config(struct config_t config) {
fprintf(stdout, " ------------------------------------------------\n");
fprintf(stdout, " Device name : \"%s\"\n", config.dev_name);
fprintf(stdout, " IB port : %u\n", config.ib_port);
if (config.server_name)
fprintf(stdout, " IP : %s\n", config.server_name);
fprintf(stdout, " TCP port : %u\n", config.tcp_port);
if (config.gid_idx >= 0)
fprintf(stdout, " GID index : %u\n", config.gid_idx);
fprintf(stdout, " ------------------------------------------------\n\n");
}
int main(int argc, char *argv[]) {
int rc = 0;
uint64_t matrix_id, n, sub_n;
int verify_fd, hugepage_fd;
char *hugepage_addr;
double *verify_matrix;
// RDMA
struct resources res;
struct config_t config = {
"mlx4_0", /* dev_name */
"127.0.0.1", /* server_name */
19875, /* tcp_port */
1, /* ib_port */
0 /* gid_idx */
};
// default the iteration is 4 times
if (argc < 4) {
printf("usage: %s <matrix_id> <verify_matrix_path> <port>\n", argv[0]);
exit(1);
}
matrix_id = (uint64_t) atoll(argv[1]);
config.tcp_port = atoi(argv[3]);
/* print the used parameters for info*/
print_config(config);
printf("mapping hugepage\n");
hugepage_fd = open("/dev/hugepages/tensorstore", O_RDWR, 0755);
if (hugepage_fd < 0) {
perror("open");
exit(1);
}
hugepage_addr = (char *) mmap(0, BUF_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, hugepage_fd, 0);
if (hugepage_addr==MAP_FAILED) {
perror("mmap");
exit(1);
}
res.buf = hugepage_addr;
memset(hugepage_addr, 0, BUF_SIZE);
printf("hugepage starting address is: %p\n", hugepage_addr);
printf("socket connection\n");
rc = make_two_tcp_connection(&res, &config);
if (rc < 0) {
perror("sock connect");
exit(1);
}
verify_fd = open(argv[2], O_RDONLY);
if (verify_fd < 0) {
perror("open");
exit(1);
}
verify_matrix = (double *) mmap(0, M * N * K * sizeof(double), PROT_READ, MAP_PRIVATE, verify_fd, 0);
if (verify_matrix==MAP_FAILED) {
perror("mmap");
exit(1);
}
printf("calculating the result of pagerank\n");
rc = nds_tensor_verify(&res, matrix_id, M, SUB_M, verify_matrix);
close(res.sock);
close(res.req_sock);
munmap(hugepage_addr, BUF_SIZE);
close(hugepage_fd);
munmap(verify_matrix, M * N * K * sizeof(double));
close(verify_fd);
return rc;
}
|
ac695a539e5a701c499f7cd54710b45ea6cf3fcf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Compile using following structure
* nvcc -rdc=true -arch compute_35 matrix_determinant_minors.cu -o min
* and profile with nvprof --unified-memory-profiling off ./min
*/
#include <hip/hip_runtime.h>
#include "book.h"
#include <hip/hip_runtime.h>
// Thread block size
#define N 3
typedef struct {
int width;
int height;
double* elements;
} Matrix;
__global__ void determinant_by_minors(Matrix matrix, double *determinant, int n, int factorial, int depth) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int factor = 1;
int j = 0;
int new_dimension = n - 1;
double prueba = 1;
Matrix aux_matrix;
aux_matrix.width = new_dimension;
aux_matrix.height = new_dimension;
hipMalloc(&aux_matrix.elements, sizeof(double) * aux_matrix.width * aux_matrix.height);
//for(int i = 0; i < matrix.height; i++){
// printf("|# ");
// for(int j = 0; j < matrix.width; j++) {
// printf("%.0f | ", matrix.elements[j + i * matrix.width]);
// }
// printf("**\n");
//}
if(n == 1) {
//printf("\nScalar determinant %1.0f. index %d and compound index %d\n", matrix.elements[0], father, (index + father));
prueba *= matrix.elements[0];
} else {
//printf("\nCalculate the minor for position %d with len %d\n", index, new_dimension);
//printf("\nMinor Matrix for position %d with len %d\n", index, new_dimension);
for(int i = n; i < n * n; i++) {
if (i == index + n * factor) {
factor++;
continue;
} else {
aux_matrix.elements[j] = matrix.elements[i];
//printf("\nFor minor %d extract the element %1.0f\n", index, matrix.elements[i]);
prueba *= matrix.elements[i];
j++;
}
}
//printf("Prueba: %1.0f \n", prueba);
hipLaunchKernelGGL(( determinant_by_minors), dim3(1), dim3(n - 1), 0, 0, aux_matrix, determinant, n - 1, factorial, father + 1);
}
//printf("\nMatrix element %1.0f. and index %d\n", matrix.elements[index], index);
//hipFree(aux_matrix.elements);
}
int main( void ) {
Matrix host_matrix;
host_matrix.width = N;
host_matrix.height = N;
double *host_determinant;
Matrix device_matrix;
device_matrix.width = host_matrix.width;
device_matrix.height = host_matrix.height;
double *device_determinant;
int factorial = 1;
for(int i = 1; i <= N; i++) {
factorial *= i;
}
// Allocate host memory
host_matrix.elements = (double*)malloc(sizeof(double) * host_matrix.width * host_matrix.height);
host_determinant = (double*)malloc(sizeof(double) * factorial * N);
for(int i = 0;i < factorial * N; i++) {
host_determinant[i] = 1000000;
}
// Initialize host Matrix
for(int i = 0; i < host_matrix.width * host_matrix.height; i++){
host_matrix.elements[i] = rand() % 100 + 1;
}
printf("\nOriginal Matrix:\n");
for(int i = 0; i < host_matrix.height; i++){
printf("| ");
for(int j = 0; j < host_matrix.width; j++) {
printf("%.0f | ", host_matrix.elements[j + i * host_matrix.width]);
}
printf("\n");
}
// Allocate device memory
HANDLE_ERROR( hipMalloc(&device_matrix.elements, sizeof(double) * device_matrix.width * device_matrix.height) );
HANDLE_ERROR( hipMalloc(&device_determinant, sizeof(double) * factorial * N) );
// Transfer data from host to device memory
HANDLE_ERROR( hipMemcpy(device_determinant, host_determinant, sizeof(double) * factorial * N, hipMemcpyHostToDevice) );
HANDLE_ERROR( hipMemcpy(device_matrix.elements, host_matrix.elements, sizeof(double) * host_matrix.width * host_matrix.height, hipMemcpyHostToDevice) );
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
// Executing kernel
hipLaunchKernelGGL(( determinant_by_minors), dim3(1), dim3(N), 0, 0, device_matrix, device_determinant, N, factorial, 0);
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf( "\nTime taken: %3.10f ms\n", elapsedTime );
// Transfer data from device to host memory
HANDLE_ERROR( hipMemcpy(host_determinant, device_determinant, sizeof(double) * factorial * N, hipMemcpyDeviceToHost) );
// Free resources
HANDLE_ERROR( hipFree(device_matrix.elements) );
HANDLE_ERROR( hipFree(device_determinant) );
for(int i = 0; i < factorial * N; i++) {
printf("\n Determinant: %1.0f\n", host_determinant[i]);
}
} | ac695a539e5a701c499f7cd54710b45ea6cf3fcf.cu | /*
* Compile using following structure
* nvcc -rdc=true -arch compute_35 matrix_determinant_minors.cu -o min
* and profile with nvprof --unified-memory-profiling off ./min
*/
#include <cuda.h>
#include "book.h"
#include <cuda_runtime.h>
// Thread block size
#define N 3
typedef struct {
int width;
int height;
double* elements;
} Matrix;
__global__ void determinant_by_minors(Matrix matrix, double *determinant, int n, int factorial, int depth) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int factor = 1;
int j = 0;
int new_dimension = n - 1;
double prueba = 1;
Matrix aux_matrix;
aux_matrix.width = new_dimension;
aux_matrix.height = new_dimension;
cudaMalloc(&aux_matrix.elements, sizeof(double) * aux_matrix.width * aux_matrix.height);
//for(int i = 0; i < matrix.height; i++){
// printf("|# ");
// for(int j = 0; j < matrix.width; j++) {
// printf("%.0f | ", matrix.elements[j + i * matrix.width]);
// }
// printf("**\n");
//}
if(n == 1) {
//printf("\nScalar determinant %1.0f. index %d and compound index %d\n", matrix.elements[0], father, (index + father));
prueba *= matrix.elements[0];
} else {
//printf("\nCalculate the minor for position %d with len %d\n", index, new_dimension);
//printf("\nMinor Matrix for position %d with len %d\n", index, new_dimension);
for(int i = n; i < n * n; i++) {
if (i == index + n * factor) {
factor++;
continue;
} else {
aux_matrix.elements[j] = matrix.elements[i];
//printf("\nFor minor %d extract the element %1.0f\n", index, matrix.elements[i]);
prueba *= matrix.elements[i];
j++;
}
}
//printf("Prueba: %1.0f \n", prueba);
determinant_by_minors<<<1, n - 1>>>(aux_matrix, determinant, n - 1, factorial, father + 1);
}
//printf("\nMatrix element %1.0f. and index %d\n", matrix.elements[index], index);
//cudaFree(aux_matrix.elements);
}
int main( void ) {
Matrix host_matrix;
host_matrix.width = N;
host_matrix.height = N;
double *host_determinant;
Matrix device_matrix;
device_matrix.width = host_matrix.width;
device_matrix.height = host_matrix.height;
double *device_determinant;
int factorial = 1;
for(int i = 1; i <= N; i++) {
factorial *= i;
}
// Allocate host memory
host_matrix.elements = (double*)malloc(sizeof(double) * host_matrix.width * host_matrix.height);
host_determinant = (double*)malloc(sizeof(double) * factorial * N);
for(int i = 0;i < factorial * N; i++) {
host_determinant[i] = 1000000;
}
// Initialize host Matrix
for(int i = 0; i < host_matrix.width * host_matrix.height; i++){
host_matrix.elements[i] = rand() % 100 + 1;
}
printf("\nOriginal Matrix:\n");
for(int i = 0; i < host_matrix.height; i++){
printf("| ");
for(int j = 0; j < host_matrix.width; j++) {
printf("%.0f | ", host_matrix.elements[j + i * host_matrix.width]);
}
printf("\n");
}
// Allocate device memory
HANDLE_ERROR( cudaMalloc(&device_matrix.elements, sizeof(double) * device_matrix.width * device_matrix.height) );
HANDLE_ERROR( cudaMalloc(&device_determinant, sizeof(double) * factorial * N) );
// Transfer data from host to device memory
HANDLE_ERROR( cudaMemcpy(device_determinant, host_determinant, sizeof(double) * factorial * N, cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(device_matrix.elements, host_matrix.elements, sizeof(double) * host_matrix.width * host_matrix.height, cudaMemcpyHostToDevice) );
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
// Executing kernel
determinant_by_minors<<<1, N>>>(device_matrix, device_determinant, N, factorial, 0);
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf( "\nTime taken: %3.10f ms\n", elapsedTime );
// Transfer data from device to host memory
HANDLE_ERROR( cudaMemcpy(host_determinant, device_determinant, sizeof(double) * factorial * N, cudaMemcpyDeviceToHost) );
// Free resources
HANDLE_ERROR( cudaFree(device_matrix.elements) );
HANDLE_ERROR( cudaFree(device_determinant) );
for(int i = 0; i < factorial * N; i++) {
printf("\n Determinant: %1.0f\n", host_determinant[i]);
}
} |
6fcd72f3d6038e9d2665dd3f159067eb72002b1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <chrono>
#include <roctracer/roctx.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_WIDTH 32
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
__shared__ float aSh[TILE_WIDTH][TILE_WIDTH];
__shared__ float bSh[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by * TILE_WIDTH + ty;
int j = bx * TILE_WIDTH + tx;
float acc = 0;
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
for (int m = 0; m < (K - 1) / TILE_WIDTH + 1; ++m) {
if (i < M && m * TILE_WIDTH + tx < K) {
aSh[ty][tx] = A(i, m * TILE_WIDTH + tx);
} else {
aSh[ty][tx] = 0;
}
if (j < N && m * TILE_WIDTH + ty < K) {
bSh[ty][tx] = B(m * TILE_WIDTH + ty, j);
} else {
bSh[ty][tx] = 0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
acc += aSh[ty][k] * bSh[k][tx];
}
__syncthreads();
}
if (i < M && j < N) {
C(i, j) = acc;
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1600 x 1500
// B: 1500 x 1400
// C: 1600 x 1400
int m = 1600;
int n = 1400;
int k = 1500;
int nIters = 5;
int nWarmup = 5;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
// 4 muls of m/2, n/2, k
const int64_t flop = int64_t(m) / 2 * int64_t(n) / 2 * int64_t(k) * 2 * 4 * nIters;
// initialize host data
std::cout << "generate data\n";
roctxRangePush("generate data");
float *aHost[2], *bHost[2], *cHost[2][2];
CUDA_RUNTIME(hipHostMalloc(&aHost[0], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&aHost[1], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&bHost[0], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&bHost[1], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[0][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[0][1], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[1][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[1][1], m / 2 * n / 2 * sizeof(float), 0));
std::generate(aHost[0], aHost[0] + m / 2 * k, random_int);
std::generate(aHost[1], aHost[1] + m / 2 * k, random_int);
std::generate(bHost[0], bHost[0] + k * n / 2, random_int);
std::generate(bHost[1], bHost[1] + k * n / 2, random_int);
roctxRangePop();
// allocate device data
std::cout << "allocate data\n";
float *aDev[2], *bDev[2], *cDev[2][2];
CUDA_RUNTIME(hipMalloc(&aDev[0], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&aDev[1], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&bDev[0], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&bDev[1], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[0][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[0][1], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[1][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[1][1], m / 2 * n / 2 * sizeof(float)));
// create streams for copy and kernels
hipStream_t copyStream, kernelStream;
CUDA_RUNTIME(hipStreamCreate(©Stream));
CUDA_RUNTIME(hipStreamCreate(&kernelStream));
hipEvent_t waitForA0B0, waitForA1, waitForB1, waitC[2][2];
CUDA_RUNTIME(hipEventCreate(&waitForA0B0));
CUDA_RUNTIME(hipEventCreate(&waitForA1));
CUDA_RUNTIME(hipEventCreate(&waitForB1));
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
CUDA_RUNTIME(hipEventCreate(&waitC[i][j]));
}
}
// GPU kernel launch parameters
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid;
dimGrid.x = (n/2 + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (m/2 + dimBlock.y - 1) / dimBlock.y;
float kernelTime = 0;
float wallTime = 0;
for (int iter = 0; iter < nIters + nWarmup; ++iter) {
roctxRangePush("wall time");
auto wallStart = Clock::now();
// copy a0 and b0
CUDA_RUNTIME(hipMemcpyAsync(aDev[0], aHost[0], m / 2 * k * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipMemcpyAsync(bDev[0], bHost[0], k * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipEventRecord(waitForA0B0, copyStream));
// have the kernelStream wait for the transfers to complete
CUDA_RUNTIME(hipStreamWaitEvent(kernelStream, waitForA0B0, 0));
// launch c[0][0] = a[0] * b[0]
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[0][0], aDev[0], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[0][0], kernelStream));
// copy a1
CUDA_RUNTIME(hipMemcpyAsync(aDev[1], aHost[1], m / 2 * k * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipEventRecord(waitForA1, kernelStream));
// launch c[1][0] = a[1] * b[0] after a[1] is on the GPU
CUDA_RUNTIME(hipStreamWaitEvent(kernelStream, waitForA1, 0));
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[1][0], aDev[1], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[1][0], kernelStream));
// copy b1
CUDA_RUNTIME(hipMemcpyAsync(bDev[1], bHost[1], k * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipEventRecord(waitForB1, kernelStream));
// launch c[0][1] = a[0] * b[1] after B1 is on the GPU
CUDA_RUNTIME(hipStreamWaitEvent(kernelStream, waitForB1, 0));
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[0][1], aDev[0], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[0][1], kernelStream));
// launch c[1][1] = a[1] * b[1]
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[1][1], aDev[1], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[1][1], kernelStream));
// copy c back to CPU as kernels finish
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[0][0], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[0][0], cDev[0][0],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[1][0], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[1][0], cDev[1][0],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[0][1], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[0][1], cDev[0][1],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[1][1], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[1][1], cDev[1][1],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipDeviceSynchronize());
roctxRangePop(); // wall time
Duration wallElapsed = Clock::now() - wallStart;
// kernel time
float kernelElapsed;
CUDA_RUNTIME(hipEventSynchronize(waitC[1][1]));
CUDA_RUNTIME(hipEventElapsedTime(&kernelElapsed, waitForA0B0, waitC[1][1]));
kernelElapsed /= 1000; // seconds
std::cout << iter << " kernel=" << kernelElapsed
<< " wall=" << wallElapsed.count()
<< (iter >= nWarmup ? " *" : " ") << "\n";
if (iter >= nWarmup) {
wallTime += wallElapsed.count();
kernelTime += kernelElapsed;
}
}
// print results
double kernelGflops = flop / 1e9 / kernelTime;
std::cout << "kernel " << kernelGflops << "GFLOPS (" << flop << " flop, "
<< kernelTime << "s)\n";
double wallGflops = flop / 1e9 / wallTime;
std::cout << "wall " << wallGflops << "GFLOPS (" << flop << " flop, "
<< wallTime << "s)\n";
// release resources
CUDA_RUNTIME(hipFree(aDev[0]));
CUDA_RUNTIME(hipFree(aDev[1]));
CUDA_RUNTIME(hipFree(bDev[0]));
CUDA_RUNTIME(hipFree(bDev[1]));
return 0;
}
| 6fcd72f3d6038e9d2665dd3f159067eb72002b1b.cu | #include <algorithm>
#include <chrono>
#include <nvToolsExt.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_WIDTH 32
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
__shared__ float aSh[TILE_WIDTH][TILE_WIDTH];
__shared__ float bSh[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by * TILE_WIDTH + ty;
int j = bx * TILE_WIDTH + tx;
float acc = 0;
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
for (int m = 0; m < (K - 1) / TILE_WIDTH + 1; ++m) {
if (i < M && m * TILE_WIDTH + tx < K) {
aSh[ty][tx] = A(i, m * TILE_WIDTH + tx);
} else {
aSh[ty][tx] = 0;
}
if (j < N && m * TILE_WIDTH + ty < K) {
bSh[ty][tx] = B(m * TILE_WIDTH + ty, j);
} else {
bSh[ty][tx] = 0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
acc += aSh[ty][k] * bSh[k][tx];
}
__syncthreads();
}
if (i < M && j < N) {
C(i, j) = acc;
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1600 x 1500
// B: 1500 x 1400
// C: 1600 x 1400
int m = 1600;
int n = 1400;
int k = 1500;
int nIters = 5;
int nWarmup = 5;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
// 4 muls of m/2, n/2, k
const int64_t flop = int64_t(m) / 2 * int64_t(n) / 2 * int64_t(k) * 2 * 4 * nIters;
// initialize host data
std::cout << "generate data\n";
nvtxRangePush("generate data");
float *aHost[2], *bHost[2], *cHost[2][2];
CUDA_RUNTIME(cudaHostAlloc(&aHost[0], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&aHost[1], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&bHost[0], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&bHost[1], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[0][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[0][1], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[1][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[1][1], m / 2 * n / 2 * sizeof(float), 0));
std::generate(aHost[0], aHost[0] + m / 2 * k, random_int);
std::generate(aHost[1], aHost[1] + m / 2 * k, random_int);
std::generate(bHost[0], bHost[0] + k * n / 2, random_int);
std::generate(bHost[1], bHost[1] + k * n / 2, random_int);
nvtxRangePop();
// allocate device data
std::cout << "allocate data\n";
float *aDev[2], *bDev[2], *cDev[2][2];
CUDA_RUNTIME(cudaMalloc(&aDev[0], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&aDev[1], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&bDev[0], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&bDev[1], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[0][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[0][1], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[1][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[1][1], m / 2 * n / 2 * sizeof(float)));
// create streams for copy and kernels
cudaStream_t copyStream, kernelStream;
CUDA_RUNTIME(cudaStreamCreate(©Stream));
CUDA_RUNTIME(cudaStreamCreate(&kernelStream));
cudaEvent_t waitForA0B0, waitForA1, waitForB1, waitC[2][2];
CUDA_RUNTIME(cudaEventCreate(&waitForA0B0));
CUDA_RUNTIME(cudaEventCreate(&waitForA1));
CUDA_RUNTIME(cudaEventCreate(&waitForB1));
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
CUDA_RUNTIME(cudaEventCreate(&waitC[i][j]));
}
}
// GPU kernel launch parameters
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid;
dimGrid.x = (n/2 + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (m/2 + dimBlock.y - 1) / dimBlock.y;
float kernelTime = 0;
float wallTime = 0;
for (int iter = 0; iter < nIters + nWarmup; ++iter) {
nvtxRangePush("wall time");
auto wallStart = Clock::now();
// copy a0 and b0
CUDA_RUNTIME(cudaMemcpyAsync(aDev[0], aHost[0], m / 2 * k * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaMemcpyAsync(bDev[0], bHost[0], k * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaEventRecord(waitForA0B0, copyStream));
// have the kernelStream wait for the transfers to complete
CUDA_RUNTIME(cudaStreamWaitEvent(kernelStream, waitForA0B0, 0));
// launch c[0][0] = a[0] * b[0]
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[0][0], aDev[0], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[0][0], kernelStream));
// copy a1
CUDA_RUNTIME(cudaMemcpyAsync(aDev[1], aHost[1], m / 2 * k * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaEventRecord(waitForA1, kernelStream));
// launch c[1][0] = a[1] * b[0] after a[1] is on the GPU
CUDA_RUNTIME(cudaStreamWaitEvent(kernelStream, waitForA1, 0));
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[1][0], aDev[1], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[1][0], kernelStream));
// copy b1
CUDA_RUNTIME(cudaMemcpyAsync(bDev[1], bHost[1], k * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaEventRecord(waitForB1, kernelStream));
// launch c[0][1] = a[0] * b[1] after B1 is on the GPU
CUDA_RUNTIME(cudaStreamWaitEvent(kernelStream, waitForB1, 0));
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[0][1], aDev[0], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[0][1], kernelStream));
// launch c[1][1] = a[1] * b[1]
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[1][1], aDev[1], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[1][1], kernelStream));
// copy c back to CPU as kernels finish
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[0][0], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[0][0], cDev[0][0],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[1][0], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[1][0], cDev[1][0],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[0][1], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[0][1], cDev[0][1],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[1][1], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[1][1], cDev[1][1],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaDeviceSynchronize());
nvtxRangePop(); // wall time
Duration wallElapsed = Clock::now() - wallStart;
// kernel time
float kernelElapsed;
CUDA_RUNTIME(cudaEventSynchronize(waitC[1][1]));
CUDA_RUNTIME(cudaEventElapsedTime(&kernelElapsed, waitForA0B0, waitC[1][1]));
kernelElapsed /= 1000; // seconds
std::cout << iter << " kernel=" << kernelElapsed
<< " wall=" << wallElapsed.count()
<< (iter >= nWarmup ? " *" : " ") << "\n";
if (iter >= nWarmup) {
wallTime += wallElapsed.count();
kernelTime += kernelElapsed;
}
}
// print results
double kernelGflops = flop / 1e9 / kernelTime;
std::cout << "kernel " << kernelGflops << "GFLOPS (" << flop << " flop, "
<< kernelTime << "s)\n";
double wallGflops = flop / 1e9 / wallTime;
std::cout << "wall " << wallGflops << "GFLOPS (" << flop << " flop, "
<< wallTime << "s)\n";
// release resources
CUDA_RUNTIME(cudaFree(aDev[0]));
CUDA_RUNTIME(cudaFree(aDev[1]));
CUDA_RUNTIME(cudaFree(bDev[0]));
CUDA_RUNTIME(cudaFree(bDev[1]));
return 0;
}
|
385e77012210a6dec229a13e330ae30515bdc907.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ldcp_decoder.h
* ldpc3
*
* Created by legal on 02/04/11.
* Copyright 2011 ENSEIRB. All rights reserved.
*
*/
/*----------------------------------------------------------------------------*/
#include "CGPU_Decoder_MS_SIMD.h"
#include "../transpose/GPU_Transpose.h"
#include "../transpose/GPU_Transpose_uint8.h"
#include "../tools/debug_fx.h"
#include "hip/hip_runtime_api.h"
#include "hip/hip_runtime.h"
static const size_t BLOCK_SIZE = 128; // 96 for exp.
CGPU_Decoder_MS_SIMD::CGPU_Decoder_MS_SIMD(size_t _nb_frames, size_t n, size_t k, size_t m):
CGPUDecoder(_nb_frames, n, k, m)
{
size_t nb_blocks = nb_frames / BLOCK_SIZE;
printf("(II) Decoder configuration: BLOCK_SIZE = %ld, nb_frames = %ld, nb_blocks = %ld\n", BLOCK_SIZE, nb_frames, nb_blocks);
struct hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
printf("(II) Identifiant du GPU (CUDA) : %s\n", devProp.name);
printf("(II) Nombre de Multi-Processor : %d\n", devProp.multiProcessorCount);
printf("(II) Taille de memoire globale : %ld\n", devProp.totalGlobalMem);
printf("(II) Taille de sharedMemPerBlock : %ld\n", devProp.sharedMemPerBlock);
/*
int regsPerBlock;
int warpSize;
size_t memPitch;
int maxThreadsPerBlock;
int clockRate;
size_t totalConstMem;
int major;
int minor;
int memoryClockRate;
int memoryBusWidth;
*/
struct hipFuncAttributes attr;
hipFuncGetAttributes(&attr, LDPC_Sched_Stage_1_MS_SIMD);
int nMP = devProp.multiProcessorCount; // NOMBRE DE STREAM PROCESSOR
int nWarp = attr.maxThreadsPerBlock/32; // PACKET DE THREADs EXECUTABLES EN PARALLELE
int nThreads = nWarp * 32; // NOMBRE DE THREAD MAXI PAR SP
int nDOF = nb_frames;
int nBperMP = 65536 / (attr.numRegs); // Nr of blocks per MP
int minB = min(nBperMP*nThreads,1024);
int nBlocks = max(minB/nThreads * nMP, nDOF/nThreads); //Total number of blocks
printf("(II) Nombre de Warp : %d\n", nWarp);
printf("(II) Nombre de Threads : %d\n", nThreads);
printf("(II) LDPC_Sched_Stage_1_MS_SIMD :\n");
printf("(II) - Nombre de regist/thr : %d\n", attr.numRegs);
printf("(II) - Nombre de local/thr : %ld\n", attr.localSizeBytes);
printf("(II) - Nombre de shared/thr : %ld\n", attr.sharedSizeBytes);
printf("(II) Number of attr.reg: %d\n", nDOF);
printf("(II) Nombre de nDOF : %d\n", nDOF);
printf("(II) Nombre de nBperMP : %d\n", nBperMP);
printf("(II) Nombre de nBperMP : %d\n", minB);
printf("(II) Nombre de nBperMP : %d\n", nBlocks);
printf("(II) Best BLOCK_SIZE : %d\n", nThreads * nBperMP);
printf("(II) Best #codewords : %d\n", 0);
if( attr.numRegs <= 32 ){
printf("(II) Best BLOCK_SIZE : %d\n", 128);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else if( attr.numRegs <= 40 ){
printf("(II) Best BLOCK_SIZE : %d\n", 96);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else if( attr.numRegs <= 48 ){
printf("(II) Best BLOCK_SIZE : %d\n", 128);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else if( attr.numRegs < 64 ){
printf("(II) Best BLOCK_SIZE : %d\n", 96);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else{
printf("(II) Best BLOCK_SIZE : ???\n");
// exit( 0 );
}
}
CGPU_Decoder_MS_SIMD::~CGPU_Decoder_MS_SIMD()
{
}
void CGPU_Decoder_MS_SIMD::initialize()
{
}
void CGPU_Decoder_MS_SIMD::decode(float Intrinsic_fix[_N], int Rprime_fix[_N], int nombre_iterations)
{
hipError_t Status;
size_t nb_blocks = nb_frames / BLOCK_SIZE;
if( nb_frames % BLOCK_SIZE != 0 ){
printf("(%ld - %ld) (%ld - %ld)\n", nb_frames, BLOCK_SIZE, nb_frames/BLOCK_SIZE, nb_frames%BLOCK_SIZE);
exit( 0 );
}
//
// ON COPIE LES DONNEES DANS => device_V
//
Status = hipMemcpy/*Async*/(d_MSG_C_2_V, Intrinsic_fix, sz_nodes * sizeof(float), hipMemcpyHostToDevice);
ERROR_CHECK(Status, __FILE__, __LINE__);
{
dim3 grid(1, nb_frames/32);
dim3 threads(32, 32);
hipLaunchKernelGGL(( Interleaver_uint8), dim3(grid), dim3(threads), 0, 0, (int*)d_MSG_C_2_V, (int*)device_V, _N, nb_frames);
}
// LDPC_Sched_Stage_1_MS_SIMD<<<nb_blocks, BLOCK_SIZE>>>((unsigned int*)device_V, (unsigned int*)d_MSG_C_2_V, d_transpose, nombre_iterations);
//
// DESENTRELACEMENT DES DONNEES POST-DECODAGE (device_V => device_R)
//
#define NORMAL 1
#if NORMAL == 1
{
// printf("(II) NB_TRAMES = %d;\n", nb_frames);
// printf("(II) FRAME_LENGTH = %d;\n", _N);
dim3 grid(1, nb_frames/32);
dim3 threads(32, 32);
// printf("(II) Processing grid = %d, %d, %d;\n", grid.x, grid.y, grid.z);
// printf("(II) Thread grid = %d, %d, %d;\n", threads.x, threads.y, threads.z);
hipLaunchKernelGGL(( InvInterleaver_uint8), dim3(grid), dim3(threads), 0, 0, (int*)device_V, (int*)d_MSG_C_2_V, _N, nb_frames);
}
#else
{
unsigned int NB_TRAMES = nb_frames;
unsigned int FRAME_LENGTH = _N;
dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
hipLaunchKernelGGL(( transposeDiagonal_and_hard_decision), dim3(grid), dim3(threads), 0, 0, (unsigned int*)d_MSG_C_2_V, (unsigned int*)device_V, NB_TRAMES, FRAME_LENGTH);
}
#endif
//
//
//
Status = hipMemcpy(Rprime_fix, d_MSG_C_2_V, sz_nodes * sizeof(float), hipMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
}
| 385e77012210a6dec229a13e330ae30515bdc907.cu | /*
* ldcp_decoder.h
* ldpc3
*
* Created by legal on 02/04/11.
* Copyright 2011 ENSEIRB. All rights reserved.
*
*/
/*----------------------------------------------------------------------------*/
#include "CGPU_Decoder_MS_SIMD.h"
#include "../transpose/GPU_Transpose.h"
#include "../transpose/GPU_Transpose_uint8.h"
#include "../tools/debug_fx.h"
#include "cuda_runtime_api.h"
#include "cuda.h"
static const size_t BLOCK_SIZE = 128; // 96 for exp.
CGPU_Decoder_MS_SIMD::CGPU_Decoder_MS_SIMD(size_t _nb_frames, size_t n, size_t k, size_t m):
CGPUDecoder(_nb_frames, n, k, m)
{
size_t nb_blocks = nb_frames / BLOCK_SIZE;
printf("(II) Decoder configuration: BLOCK_SIZE = %ld, nb_frames = %ld, nb_blocks = %ld\n", BLOCK_SIZE, nb_frames, nb_blocks);
struct cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
printf("(II) Identifiant du GPU (CUDA) : %s\n", devProp.name);
printf("(II) Nombre de Multi-Processor : %d\n", devProp.multiProcessorCount);
printf("(II) Taille de memoire globale : %ld\n", devProp.totalGlobalMem);
printf("(II) Taille de sharedMemPerBlock : %ld\n", devProp.sharedMemPerBlock);
/*
int regsPerBlock;
int warpSize;
size_t memPitch;
int maxThreadsPerBlock;
int clockRate;
size_t totalConstMem;
int major;
int minor;
int memoryClockRate;
int memoryBusWidth;
*/
struct cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, LDPC_Sched_Stage_1_MS_SIMD);
int nMP = devProp.multiProcessorCount; // NOMBRE DE STREAM PROCESSOR
int nWarp = attr.maxThreadsPerBlock/32; // PACKET DE THREADs EXECUTABLES EN PARALLELE
int nThreads = nWarp * 32; // NOMBRE DE THREAD MAXI PAR SP
int nDOF = nb_frames;
int nBperMP = 65536 / (attr.numRegs); // Nr of blocks per MP
int minB = min(nBperMP*nThreads,1024);
int nBlocks = max(minB/nThreads * nMP, nDOF/nThreads); //Total number of blocks
printf("(II) Nombre de Warp : %d\n", nWarp);
printf("(II) Nombre de Threads : %d\n", nThreads);
printf("(II) LDPC_Sched_Stage_1_MS_SIMD :\n");
printf("(II) - Nombre de regist/thr : %d\n", attr.numRegs);
printf("(II) - Nombre de local/thr : %ld\n", attr.localSizeBytes);
printf("(II) - Nombre de shared/thr : %ld\n", attr.sharedSizeBytes);
printf("(II) Number of attr.reg: %d\n", nDOF);
printf("(II) Nombre de nDOF : %d\n", nDOF);
printf("(II) Nombre de nBperMP : %d\n", nBperMP);
printf("(II) Nombre de nBperMP : %d\n", minB);
printf("(II) Nombre de nBperMP : %d\n", nBlocks);
printf("(II) Best BLOCK_SIZE : %d\n", nThreads * nBperMP);
printf("(II) Best #codewords : %d\n", 0);
if( attr.numRegs <= 32 ){
printf("(II) Best BLOCK_SIZE : %d\n", 128);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else if( attr.numRegs <= 40 ){
printf("(II) Best BLOCK_SIZE : %d\n", 96);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else if( attr.numRegs <= 48 ){
printf("(II) Best BLOCK_SIZE : %d\n", 128);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else if( attr.numRegs < 64 ){
printf("(II) Best BLOCK_SIZE : %d\n", 96);
printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256);
}else{
printf("(II) Best BLOCK_SIZE : ???\n");
// exit( 0 );
}
}
CGPU_Decoder_MS_SIMD::~CGPU_Decoder_MS_SIMD()
{
}
void CGPU_Decoder_MS_SIMD::initialize()
{
}
void CGPU_Decoder_MS_SIMD::decode(float Intrinsic_fix[_N], int Rprime_fix[_N], int nombre_iterations)
{
cudaError_t Status;
size_t nb_blocks = nb_frames / BLOCK_SIZE;
if( nb_frames % BLOCK_SIZE != 0 ){
printf("(%ld - %ld) (%ld - %ld)\n", nb_frames, BLOCK_SIZE, nb_frames/BLOCK_SIZE, nb_frames%BLOCK_SIZE);
exit( 0 );
}
//
// ON COPIE LES DONNEES DANS => device_V
//
Status = cudaMemcpy/*Async*/(d_MSG_C_2_V, Intrinsic_fix, sz_nodes * sizeof(float), cudaMemcpyHostToDevice);
ERROR_CHECK(Status, __FILE__, __LINE__);
{
dim3 grid(1, nb_frames/32);
dim3 threads(32, 32);
Interleaver_uint8<<<grid, threads>>>((int*)d_MSG_C_2_V, (int*)device_V, _N, nb_frames);
}
// LDPC_Sched_Stage_1_MS_SIMD<<<nb_blocks, BLOCK_SIZE>>>((unsigned int*)device_V, (unsigned int*)d_MSG_C_2_V, d_transpose, nombre_iterations);
//
// DESENTRELACEMENT DES DONNEES POST-DECODAGE (device_V => device_R)
//
#define NORMAL 1
#if NORMAL == 1
{
// printf("(II) NB_TRAMES = %d;\n", nb_frames);
// printf("(II) FRAME_LENGTH = %d;\n", _N);
dim3 grid(1, nb_frames/32);
dim3 threads(32, 32);
// printf("(II) Processing grid = %d, %d, %d;\n", grid.x, grid.y, grid.z);
// printf("(II) Thread grid = %d, %d, %d;\n", threads.x, threads.y, threads.z);
InvInterleaver_uint8<<<grid, threads>>>((int*)device_V, (int*)d_MSG_C_2_V, _N, nb_frames);
}
#else
{
unsigned int NB_TRAMES = nb_frames;
unsigned int FRAME_LENGTH = _N;
dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
transposeDiagonal_and_hard_decision<<<grid, threads>>>((unsigned int*)d_MSG_C_2_V, (unsigned int*)device_V, NB_TRAMES, FRAME_LENGTH);
}
#endif
//
//
//
Status = cudaMemcpy(Rprime_fix, d_MSG_C_2_V, sz_nodes * sizeof(float), cudaMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
}
|
74a76c7dff964be4690ab6f3c2f2bd509e4d873b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detection/box_clip_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTenso = framework::LoDTensor;
static constexpr int ImInfoSize = 3;
template <typename T, int BlockSize>
static __global__ void GPUBoxClip(const T *input, const size_t *lod,
const size_t width, const T *im_info,
T *output) {
T im_w = round(im_info[blockIdx.x * ImInfoSize + 1] /
im_info[blockIdx.x * ImInfoSize + 2]);
T im_h = round(im_info[blockIdx.x * ImInfoSize] /
im_info[blockIdx.x * ImInfoSize + 2]);
for (int i = threadIdx.x; i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * width;
i += BlockSize) {
int idx = lod[blockIdx.x] * width + i;
T im_size = (idx % 2 == 0) ? im_w : im_h;
output[idx] = max(min(input[idx], im_size - 1), T(0.));
}
}
template <typename DeviceContext, typename T>
class GPUBoxClipKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *input = context.Input<LoDTensor>("Input");
auto *im_info = context.Input<Tensor>("ImInfo");
auto *output = context.Output<LoDTensor>("Output");
const int64_t num = input->dims()[0];
const int64_t bbox_width = input->numel() / num;
auto lod = input->lod();
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
auto &dev_ctx = context.template device_context<DeviceContext>();
auto stream = dev_ctx.stream();
const size_t batch_size = lod.back().size() - 1;
T *output_data = output->mutable_data<T>(dev_ctx.GetPlace());
hipLaunchKernelGGL(( GPUBoxClip<T, 512>), dim3(batch_size), dim3(512), 0, stream,
input->data<T>(), abs_offset_lod[0].CUDAMutableData(dev_ctx.GetPlace()),
bbox_width, im_info->data<T>(), output_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
box_clip, ops::GPUBoxClipKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUBoxClipKernel<paddle::platform::CUDADeviceContext, double>);
| 74a76c7dff964be4690ab6f3c2f2bd509e4d873b.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detection/box_clip_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTenso = framework::LoDTensor;
static constexpr int ImInfoSize = 3;
template <typename T, int BlockSize>
static __global__ void GPUBoxClip(const T *input, const size_t *lod,
const size_t width, const T *im_info,
T *output) {
T im_w = round(im_info[blockIdx.x * ImInfoSize + 1] /
im_info[blockIdx.x * ImInfoSize + 2]);
T im_h = round(im_info[blockIdx.x * ImInfoSize] /
im_info[blockIdx.x * ImInfoSize + 2]);
for (int i = threadIdx.x; i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * width;
i += BlockSize) {
int idx = lod[blockIdx.x] * width + i;
T im_size = (idx % 2 == 0) ? im_w : im_h;
output[idx] = max(min(input[idx], im_size - 1), T(0.));
}
}
template <typename DeviceContext, typename T>
class GPUBoxClipKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *input = context.Input<LoDTensor>("Input");
auto *im_info = context.Input<Tensor>("ImInfo");
auto *output = context.Output<LoDTensor>("Output");
const int64_t num = input->dims()[0];
const int64_t bbox_width = input->numel() / num;
auto lod = input->lod();
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
auto &dev_ctx = context.template device_context<DeviceContext>();
auto stream = dev_ctx.stream();
const size_t batch_size = lod.back().size() - 1;
T *output_data = output->mutable_data<T>(dev_ctx.GetPlace());
GPUBoxClip<T, 512><<<batch_size, 512, 0, stream>>>(
input->data<T>(), abs_offset_lod[0].CUDAMutableData(dev_ctx.GetPlace()),
bbox_width, im_info->data<T>(), output_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
box_clip, ops::GPUBoxClipKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUBoxClipKernel<paddle::platform::CUDADeviceContext, double>);
|
c0aba67897ff7553415ece8327d0e91a242242b2.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator);
}
REGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);
}} // namespace at::native
| c0aba67897ff7553415ece8327d0e91a242242b2.cu | #include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator);
}
REGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);
}} // namespace at::native
|
0b164cbbf415bd3400981fd7f508071dd03e9512.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 0b164cbbf415bd3400981fd7f508071dd03e9512.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
dc515bdef316c6f6e9566fd7935185f78a65eebf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Example showing the use of CUFFT for fast 1D-convolution using FFT.
* This sample is the same as simpleCUFFT, except that it uses a callback
* function to perform the pointwise multiply and scale, on input to the
* inverse transform.
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
// This is the callback routine prototype
static __device__ hipfftComplex ComplexPointwiseMulAndScale(void * a, size_t index, void * cb_info, void *sharedmem);
typedef struct _cb_params{
Complex *filter;
float scale;
} cb_params;
// This is the callback routine. It does complex pointwise multiplication with scaling.
static __device__ hipfftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem)
{
cb_params * my_params = (cb_params *)cb_info;
return (hipfftComplex)ComplexScale(ComplexMul(((Complex *)a)[index],
(my_params->filter)[index]),
my_params->scale);
}
// Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT
__device__ cufftCallbackLoadC myOwnCallbackPtr = ComplexPointwiseMulAndScale;
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
struct hipDeviceProp_t properties;
int device;
checkCudaErrors(hipGetDevice(&device));
checkCudaErrors(hipGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("simpleCUFFT_callback requires CUDA architecture SM2.0 or higher\n");
exit(EXIT_WAIVED);
}
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUFFT callbacks
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
printf("[simpleCUFFT_callback] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(hipMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_signal, h_padded_signal, mem_size,
hipMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(hipMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
hipMemcpyHostToDevice));
// Create one CUFFT plan for the forward transforms, and one for the reverse transform
// with load callback.
hipfftHandle plan, cb_plan;
size_t work_size;
checkCudaErrors(hipfftCreate(&plan));
checkCudaErrors(hipfftCreate(&cb_plan));
checkCudaErrors(hipfftMakePlan1d(plan, new_size, HIPFFT_C2C, 1, &work_size));
checkCudaErrors(hipfftMakePlan1d(cb_plan, new_size, HIPFFT_C2C, 1, &work_size));
// Define a structure used to pass in the device address of the filter kernel, and
// the scale factor
cb_params h_params;
h_params.filter = d_filter_kernel;
h_params.scale = 1.0f / new_size;
// Allocate device memory for parameters
cb_params *d_params;
checkCudaErrors(hipMalloc((void **)&d_params, sizeof(cb_params)));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_params, &h_params, sizeof(cb_params),
hipMemcpyHostToDevice));
// The host needs to get a copy of the device pointer to the callback
cufftCallbackLoadC hostCopyOfCallbackPtr;
checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr,
myOwnCallbackPtr,
sizeof(hostCopyOfCallbackPtr)));
// Now associate the load callback with the plan.
hipfftResult status = cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params);
if (status == HIPFFT_LICENSE_ERROR)
{
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
exit(EXIT_WAIVED);
} else {
checkCudaErrors(cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params));
}
// Transform signal and kernel
printf("Transforming signal hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD));
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD));
// Transform signal back, using the callback to do the pointwise multiply on the way in.
printf("Transforming signal back hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(cb_plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(hipMemcpy(h_convolved_signal, d_signal, mem_size,
hipMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(hipfftDestroy(plan));
checkCudaErrors(hipfftDestroy(cb_plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(hipFree(d_signal));
checkCudaErrors(hipFree(d_filter_kernel));
checkCudaErrors(hipFree(d_params));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
| dc515bdef316c6f6e9566fd7935185f78a65eebf.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Example showing the use of CUFFT for fast 1D-convolution using FFT.
* This sample is the same as simpleCUFFT, except that it uses a callback
* function to perform the pointwise multiply and scale, on input to the
* inverse transform.
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
// This is the callback routine prototype
static __device__ cufftComplex ComplexPointwiseMulAndScale(void * a, size_t index, void * cb_info, void *sharedmem);
typedef struct _cb_params{
Complex *filter;
float scale;
} cb_params;
// This is the callback routine. It does complex pointwise multiplication with scaling.
static __device__ cufftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem)
{
cb_params * my_params = (cb_params *)cb_info;
return (cufftComplex)ComplexScale(ComplexMul(((Complex *)a)[index],
(my_params->filter)[index]),
my_params->scale);
}
// Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT
__device__ cufftCallbackLoadC myOwnCallbackPtr = ComplexPointwiseMulAndScale;
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
struct cudaDeviceProp properties;
int device;
checkCudaErrors(cudaGetDevice(&device));
checkCudaErrors(cudaGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("simpleCUFFT_callback requires CUDA architecture SM2.0 or higher\n");
exit(EXIT_WAIVED);
}
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUFFT callbacks
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
printf("[simpleCUFFT_callback] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(cudaMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_signal, h_padded_signal, mem_size,
cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
cudaMemcpyHostToDevice));
// Create one CUFFT plan for the forward transforms, and one for the reverse transform
// with load callback.
cufftHandle plan, cb_plan;
size_t work_size;
checkCudaErrors(cufftCreate(&plan));
checkCudaErrors(cufftCreate(&cb_plan));
checkCudaErrors(cufftMakePlan1d(plan, new_size, CUFFT_C2C, 1, &work_size));
checkCudaErrors(cufftMakePlan1d(cb_plan, new_size, CUFFT_C2C, 1, &work_size));
// Define a structure used to pass in the device address of the filter kernel, and
// the scale factor
cb_params h_params;
h_params.filter = d_filter_kernel;
h_params.scale = 1.0f / new_size;
// Allocate device memory for parameters
cb_params *d_params;
checkCudaErrors(cudaMalloc((void **)&d_params, sizeof(cb_params)));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_params, &h_params, sizeof(cb_params),
cudaMemcpyHostToDevice));
// The host needs to get a copy of the device pointer to the callback
cufftCallbackLoadC hostCopyOfCallbackPtr;
checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr,
myOwnCallbackPtr,
sizeof(hostCopyOfCallbackPtr)));
// Now associate the load callback with the plan.
cufftResult status = cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params);
if (status == CUFFT_LICENSE_ERROR)
{
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
exit(EXIT_WAIVED);
} else {
checkCudaErrors(cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params));
}
// Transform signal and kernel
printf("Transforming signal cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD));
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD));
// Transform signal back, using the callback to do the pointwise multiply on the way in.
printf("Transforming signal back cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(cb_plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size,
cudaMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(cufftDestroy(plan));
checkCudaErrors(cufftDestroy(cb_plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(cudaFree(d_signal));
checkCudaErrors(cudaFree(d_filter_kernel));
checkCudaErrors(cudaFree(d_params));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
|
64f3e4b4d8b931e4019733c7156001f7b4574cf5.hip | // !!! This is a file automatically generated by hipify!!!
/**
* This ended up not being as efficient as CPU threads.
*
* See the "Results 1" results for the benchmark data.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <hip/hip_runtime.h>
#include "../matrix.c"
#include "../matrix.h"
void fail(const char *message)
{
printf(message);
exit(EXIT_FAILURE);
}
__global__ void doMultiply(int *d_result, int *d_a, int *d_b,
int m, int n, int p, int nThreads)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int nElements = m * p;
while (idx < nElements) {
int r = idx / p; // Integer division on purpose
int c = idx % p;
int *v1 = d_a + (r * n);
int *v2 = d_b + (c * n);
int dotProd = 0;
for (int i = 0; i < n; i++) {
dotProd += v1[i] * v2[i];
}
d_result[r * p + c] = dotProd;
idx += nThreads;
}
}
/**
* Assumes a is row-major and b is column-major.
*
* Result is always row-major.
*/
Matrix *multiply(Matrix *a, Matrix *b)
{
Matrix *result = generateMatrix(a->nRows, b->nCols, true);
// Move A to device
int *d_a = NULL;
if (hipMalloc((void **) &d_a, a->nRows * a->nCols * sizeof(int)) != hipSuccess)
fail("Failed to allocate space for A");
if (hipMemcpy(d_a, a->values, a->nRows * a->nCols * sizeof(int), hipMemcpyHostToDevice) != hipSuccess)
fail("Failed to copy A over");
// Move B to device
int *d_b = NULL;
if (hipMalloc((void **) &d_b, b->nRows * b->nCols * sizeof(int)) != hipSuccess)
fail("Failed to allocate space for B");
if (hipMemcpy(d_b, b->values, b->nRows * b->nCols * sizeof(int), hipMemcpyHostToDevice) != hipSuccess)
fail("Failed to copy B over");
// Allocate space for AB
int *d_result = NULL;
if (hipMalloc((void **)&d_result, a->nRows * b->nCols * sizeof(int)) != hipSuccess)
fail("Failed to allocate space for the result matrix");
// Calculate
// int threadsPerBlock = 192;
// int nBlocks = 13;
int threadsPerBlock = 128;
int nBlocks = 32;
int nThreads = threadsPerBlock * nBlocks;
int nElements = a->nRows * b->nCols;
hipLaunchKernelGGL(( doMultiply), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, d_result, d_a, d_b, a->nRows, a->nCols, b->nCols, nThreads);
if (hipGetLastError() != hipSuccess)
fail("Failure in CUDA kernel execution");
if (hipMemcpy(result->values, d_result, nElements * sizeof(int), hipMemcpyDeviceToHost) != hipSuccess)
fail("Failed to copy result matrix to host");
return result;
}
int main(int argc, char *argv[])
{
// Ensure enough arguments exist
if (argc < 5)
fail("Required arguments: nRows1 nCols1 nRows2 nCols2\n");
int nRows1,
nCols1,
nRows2,
nCols2;
// It's okay that atoi returns 0 on invalid
// because 0 is an invalid matrix dimension
if ((nRows1 = atoi(argv[1])) == 0)
fail("Invalid matrix dimension.\n");
if ((nCols1 = atoi(argv[2])) == 0)
fail("Invalid matrix dimension.\n");
if ((nRows2 = atoi(argv[3])) == 0)
fail("Invalid matrix dimension.\n");
if ((nCols2 = atoi(argv[4])) == 0)
fail("Invalid matrix dimension.\n");
// Negative matrix dimensions are also bad
if (nRows1 < 0 || nCols1 < 0 || nRows2 < 0 || nCols2 < 0)
fail("Invalid matrix dimension.\n");
// Make sure the matrix multiplication is valid
if (nCols1 != nRows2)
fail("Matrices cannot be multiplied (nCols1 needs to equal nRows2)\n");
// Echo matrix dimensions to the user
// printf("%d x %d\n", nRows1, nCols1);
// printf("%d x %d\n", nRows2, nCols2);
// printf("\n");
Matrix *a = generateMatrix(nRows1, nCols1, true);
fillMatrixStepwise(a);
Matrix *b = generateMatrix(nRows2, nCols2, false);
fillMatrixStepwise(b);
Matrix *ab = multiply(a, b);
// printMatrix(a);
// printf("\n");
// printMatrix(b);
// printf("\n");
// printMatrix(ab);
// Clean up
destroyMatrix(a);
destroyMatrix(b);
destroyMatrix(ab);
hipDeviceReset();
return EXIT_SUCCESS;
}
| 64f3e4b4d8b931e4019733c7156001f7b4574cf5.cu | /**
* This ended up not being as efficient as CPU threads.
*
* See the "Results 1" results for the benchmark data.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <cuda_runtime.h>
#include "../matrix.c"
#include "../matrix.h"
void fail(const char *message)
{
printf(message);
exit(EXIT_FAILURE);
}
__global__ void doMultiply(int *d_result, int *d_a, int *d_b,
int m, int n, int p, int nThreads)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int nElements = m * p;
while (idx < nElements) {
int r = idx / p; // Integer division on purpose
int c = idx % p;
int *v1 = d_a + (r * n);
int *v2 = d_b + (c * n);
int dotProd = 0;
for (int i = 0; i < n; i++) {
dotProd += v1[i] * v2[i];
}
d_result[r * p + c] = dotProd;
idx += nThreads;
}
}
/**
* Assumes a is row-major and b is column-major.
*
* Result is always row-major.
*/
Matrix *multiply(Matrix *a, Matrix *b)
{
Matrix *result = generateMatrix(a->nRows, b->nCols, true);
// Move A to device
int *d_a = NULL;
if (cudaMalloc((void **) &d_a, a->nRows * a->nCols * sizeof(int)) != cudaSuccess)
fail("Failed to allocate space for A");
if (cudaMemcpy(d_a, a->values, a->nRows * a->nCols * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess)
fail("Failed to copy A over");
// Move B to device
int *d_b = NULL;
if (cudaMalloc((void **) &d_b, b->nRows * b->nCols * sizeof(int)) != cudaSuccess)
fail("Failed to allocate space for B");
if (cudaMemcpy(d_b, b->values, b->nRows * b->nCols * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess)
fail("Failed to copy B over");
// Allocate space for AB
int *d_result = NULL;
if (cudaMalloc((void **)&d_result, a->nRows * b->nCols * sizeof(int)) != cudaSuccess)
fail("Failed to allocate space for the result matrix");
// Calculate
// int threadsPerBlock = 192;
// int nBlocks = 13;
int threadsPerBlock = 128;
int nBlocks = 32;
int nThreads = threadsPerBlock * nBlocks;
int nElements = a->nRows * b->nCols;
doMultiply<<<nBlocks, threadsPerBlock>>>(d_result, d_a, d_b, a->nRows, a->nCols, b->nCols, nThreads);
if (cudaGetLastError() != cudaSuccess)
fail("Failure in CUDA kernel execution");
if (cudaMemcpy(result->values, d_result, nElements * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
fail("Failed to copy result matrix to host");
return result;
}
int main(int argc, char *argv[])
{
// Ensure enough arguments exist
if (argc < 5)
fail("Required arguments: nRows1 nCols1 nRows2 nCols2\n");
int nRows1,
nCols1,
nRows2,
nCols2;
// It's okay that atoi returns 0 on invalid
// because 0 is an invalid matrix dimension
if ((nRows1 = atoi(argv[1])) == 0)
fail("Invalid matrix dimension.\n");
if ((nCols1 = atoi(argv[2])) == 0)
fail("Invalid matrix dimension.\n");
if ((nRows2 = atoi(argv[3])) == 0)
fail("Invalid matrix dimension.\n");
if ((nCols2 = atoi(argv[4])) == 0)
fail("Invalid matrix dimension.\n");
// Negative matrix dimensions are also bad
if (nRows1 < 0 || nCols1 < 0 || nRows2 < 0 || nCols2 < 0)
fail("Invalid matrix dimension.\n");
// Make sure the matrix multiplication is valid
if (nCols1 != nRows2)
fail("Matrices cannot be multiplied (nCols1 needs to equal nRows2)\n");
// Echo matrix dimensions to the user
// printf("%d x %d\n", nRows1, nCols1);
// printf("%d x %d\n", nRows2, nCols2);
// printf("\n");
Matrix *a = generateMatrix(nRows1, nCols1, true);
fillMatrixStepwise(a);
Matrix *b = generateMatrix(nRows2, nCols2, false);
fillMatrixStepwise(b);
Matrix *ab = multiply(a, b);
// printMatrix(a);
// printf("\n");
// printMatrix(b);
// printf("\n");
// printMatrix(ab);
// Clean up
destroyMatrix(a);
destroyMatrix(b);
destroyMatrix(ab);
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
fb7c00709d34e055d334556e07a438ec66ab70c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "layer.hpp"
__global__ void ForwardReLU(float* Z, int nRowsZ, int nColsZ, float* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nRowsZ * nColsZ)
{
if (Z[index] >= 0)
A[index] = Z[index];
else
A[index] = 0;
}
}
__global__ void BackwardReLU(float* Z, float* dA, int nRowsdZ, int nColsdZ,
float *dZ)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nRowsdZ * nColsdZ)
{
if (Z[index] >= 0)
dZ[index] = dA[index];
else
dZ[index] = 0;
}
}
class ReLU : public Layer
{
public:
ReLU()
{
dimBlock = 64;
}
~ReLU()
{
/* Nothing to do here */
}
Matrix& Forward(Matrix& Z)
{
this->Z = Z;
A.AllocateMemory(Z.nRows, Z.nCols);
int dimGrid;
if ((Z.nRows * Z.nCols) % dimBlock == 0)
dimGrid = (Z.nRows * Z.nCols) / dimBlock;
else
dimGrid = (Z.nRows * Z.nCols) / dimBlock + 1;
hipLaunchKernelGGL(( ForwardReLU), dim3(dimGrid), dim3(dimBlock), 0, 0, Z.deviceMat.get(), Z.nRows, Z.nCols,
A.deviceMat.get());
CheckErrors(hipGetLastError(),
"ReLU:: Kernel invocation: ForwardReLU");
// Comment the below line if it's not needed on the host.
// A.CopyDeviceToHost();
return A;
}
Matrix& Backward(Matrix& dA, float lr)
{
dZ.AllocateMemory(Z.nRows, Z.nCols);
int dimGrid;
if ((dZ.nRows * dZ.nCols) % dimBlock == 0)
dimGrid = (dZ.nRows * dZ.nCols) / dimBlock;
else
dimGrid = (dZ.nRows * dZ.nCols) / dimBlock + 1;
hipLaunchKernelGGL(( BackwardReLU), dim3(dimGrid), dim3(dimBlock), 0, 0, Z.deviceMat.get(), dA.deviceMat.get(),
dZ.nRows, dZ.nCols, dZ.deviceMat.get());
CheckErrors(hipGetLastError(),
"ReLU:: Kernel invocation: BackwardReLU");
// Comment the below line if it's not needed on the host.
// dZ.CopyDeviceToHost();
return dZ;
}
/*
* CPU implementations of functions for time study.
*/
Matrix& ForwardCPU(Matrix& Z)
{
this->Z = Z;
A.AllocateMemory(Z.nRows, Z.nCols);
for (int i = 0; i < A.nRows; i++)
{
for (int j = 0; j < A.nCols; j++)
{
if (Z(i, j) >= 0)
A(i, j) = Z(i, j);
else
A(i, j) = 0;
}
}
// A.CopyHostToDevice();
return A;
}
Matrix& BackwardCPU(Matrix& dA, float lr)
{
dZ.AllocateMemory(Z.nRows, Z.nCols);
for (int i = 0; i < A.nRows; i++)
{
for (int j = 0; j < A.nCols; j++)
{
if (Z(i, j) >= 0)
dZ(i, j) = dA(i, j);
else
dZ(i, j) = 0;
}
}
// dZ.CopyHostToDevice();
return dZ;
}
private:
// Input and its derivative w.r.t. the loss.
Matrix Z;
Matrix dZ;
// Output.
Matrix A;
int dimBlock;
};
| fb7c00709d34e055d334556e07a438ec66ab70c8.cu | #pragma once
#include "layer.hpp"
__global__ void ForwardReLU(float* Z, int nRowsZ, int nColsZ, float* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nRowsZ * nColsZ)
{
if (Z[index] >= 0)
A[index] = Z[index];
else
A[index] = 0;
}
}
__global__ void BackwardReLU(float* Z, float* dA, int nRowsdZ, int nColsdZ,
float *dZ)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nRowsdZ * nColsdZ)
{
if (Z[index] >= 0)
dZ[index] = dA[index];
else
dZ[index] = 0;
}
}
class ReLU : public Layer
{
public:
ReLU()
{
dimBlock = 64;
}
~ReLU()
{
/* Nothing to do here */
}
Matrix& Forward(Matrix& Z)
{
this->Z = Z;
A.AllocateMemory(Z.nRows, Z.nCols);
int dimGrid;
if ((Z.nRows * Z.nCols) % dimBlock == 0)
dimGrid = (Z.nRows * Z.nCols) / dimBlock;
else
dimGrid = (Z.nRows * Z.nCols) / dimBlock + 1;
ForwardReLU<<<dimGrid, dimBlock>>>(Z.deviceMat.get(), Z.nRows, Z.nCols,
A.deviceMat.get());
CheckErrors(cudaGetLastError(),
"ReLU:: Kernel invocation: ForwardReLU");
// Comment the below line if it's not needed on the host.
// A.CopyDeviceToHost();
return A;
}
Matrix& Backward(Matrix& dA, float lr)
{
dZ.AllocateMemory(Z.nRows, Z.nCols);
int dimGrid;
if ((dZ.nRows * dZ.nCols) % dimBlock == 0)
dimGrid = (dZ.nRows * dZ.nCols) / dimBlock;
else
dimGrid = (dZ.nRows * dZ.nCols) / dimBlock + 1;
BackwardReLU<<<dimGrid, dimBlock>>>(Z.deviceMat.get(), dA.deviceMat.get(),
dZ.nRows, dZ.nCols, dZ.deviceMat.get());
CheckErrors(cudaGetLastError(),
"ReLU:: Kernel invocation: BackwardReLU");
// Comment the below line if it's not needed on the host.
// dZ.CopyDeviceToHost();
return dZ;
}
/*
* CPU implementations of functions for time study.
*/
Matrix& ForwardCPU(Matrix& Z)
{
this->Z = Z;
A.AllocateMemory(Z.nRows, Z.nCols);
for (int i = 0; i < A.nRows; i++)
{
for (int j = 0; j < A.nCols; j++)
{
if (Z(i, j) >= 0)
A(i, j) = Z(i, j);
else
A(i, j) = 0;
}
}
// A.CopyHostToDevice();
return A;
}
Matrix& BackwardCPU(Matrix& dA, float lr)
{
dZ.AllocateMemory(Z.nRows, Z.nCols);
for (int i = 0; i < A.nRows; i++)
{
for (int j = 0; j < A.nCols; j++)
{
if (Z(i, j) >= 0)
dZ(i, j) = dA(i, j);
else
dZ(i, j) = 0;
}
}
// dZ.CopyHostToDevice();
return dZ;
}
private:
// Input and its derivative w.r.t. the loss.
Matrix Z;
Matrix dZ;
// Output.
Matrix A;
int dimBlock;
};
|
a9e0586c31cfe4dd32083670e2b4820a6e08a27b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
using std::cout;
using std::endl;
__global__ void bgr_to_gray_kernel( unsigned char* input, unsigned char* output, unsigned char* outputA, unsigned char* outputB, int width, int height, int widthstep)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if((xIndex>1) && (yIndex>1) && (xIndex<width-1) && (yIndex<height-1))
{
const int tid = yIndex * widthstep + xIndex;
const int tidA = (yIndex) * widthstep + (xIndex-1);
const int tidB = (yIndex) * widthstep + (xIndex+1);
const int tidC = (yIndex-1) * widthstep + (xIndex);
const int tidD = (yIndex-1) * widthstep + (xIndex-1);
/*const int tidE = (yIndex-1) * widthstep + (xIndex+1);
const int tidF = (yIndex+1) * widthstep + (xIndex);
const int tidG = (yIndex+1) * widthstep + (xIndex-1);
const int tidH = (yIndex+1) * widthstep + (xIndex+1);*/
if(input[tid]>100)
outputA[tid]=255;
else
outputA[tid]=0;
__syncthreads();
if((outputA[tidA]>=100)&&(outputA[tidB]>=100)&&(outputA[tidC]>=100)&&(outputA[tidD]>=100))
outputB[tid]=255;
else
outputB[tid]=0;
__syncthreads();
if((outputB[tidA]>=100)||(outputB[tidB]>=100)||(outputB[tidC]>=100)||(outputB[tidD]>=100))
output[tid]=0;
else
output[tid]=255;
}
}
void convert_to_gray(const cv::Mat& input, cv::Mat& output)
{
const int Bytes = input.step * input.rows;
unsigned char *d_input, *d_output, *d_outputA, *d_outputB;
hipMalloc<unsigned char>(&d_input,Bytes);
hipMalloc<unsigned char>(&d_output,Bytes);
hipMalloc<unsigned char>(&d_outputA,Bytes);
hipMalloc<unsigned char>(&d_outputB,Bytes);
hipMemcpy(d_input,input.ptr(),Bytes,hipMemcpyHostToDevice);
const dim3 block(16,16);
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
hipLaunchKernelGGL(( bgr_to_gray_kernel), dim3(grid),dim3(block), 0, 0, d_input,d_output,d_outputA,d_outputB,input.cols,input.rows,input.step);
hipDeviceSynchronize();
hipMemcpy(output.ptr(),d_output,Bytes,hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_output);
hipFree(d_outputA);
hipFree(d_outputB);
}
int main()
{
std::string imagePath = "image.jpg";
cv::Mat Input = cv::imread(imagePath,CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat Output(Input.rows,Input.cols,CV_8U);
if(Input.empty())
{
std::cout<<"Image Not Found!"<<std::endl;
std::cin.get();
return -1;
}
convert_to_gray(Input,Output);
cv::imshow("Input",Input);
cv::imshow("Output",Output);
cv::waitKey();
return 0;
}
| a9e0586c31cfe4dd32083670e2b4820a6e08a27b.cu | #include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
using std::cout;
using std::endl;
__global__ void bgr_to_gray_kernel( unsigned char* input, unsigned char* output, unsigned char* outputA, unsigned char* outputB, int width, int height, int widthstep)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if((xIndex>1) && (yIndex>1) && (xIndex<width-1) && (yIndex<height-1))
{
const int tid = yIndex * widthstep + xIndex;
const int tidA = (yIndex) * widthstep + (xIndex-1);
const int tidB = (yIndex) * widthstep + (xIndex+1);
const int tidC = (yIndex-1) * widthstep + (xIndex);
const int tidD = (yIndex-1) * widthstep + (xIndex-1);
/*const int tidE = (yIndex-1) * widthstep + (xIndex+1);
const int tidF = (yIndex+1) * widthstep + (xIndex);
const int tidG = (yIndex+1) * widthstep + (xIndex-1);
const int tidH = (yIndex+1) * widthstep + (xIndex+1);*/
if(input[tid]>100)
outputA[tid]=255;
else
outputA[tid]=0;
__syncthreads();
if((outputA[tidA]>=100)&&(outputA[tidB]>=100)&&(outputA[tidC]>=100)&&(outputA[tidD]>=100))
outputB[tid]=255;
else
outputB[tid]=0;
__syncthreads();
if((outputB[tidA]>=100)||(outputB[tidB]>=100)||(outputB[tidC]>=100)||(outputB[tidD]>=100))
output[tid]=0;
else
output[tid]=255;
}
}
void convert_to_gray(const cv::Mat& input, cv::Mat& output)
{
const int Bytes = input.step * input.rows;
unsigned char *d_input, *d_output, *d_outputA, *d_outputB;
cudaMalloc<unsigned char>(&d_input,Bytes);
cudaMalloc<unsigned char>(&d_output,Bytes);
cudaMalloc<unsigned char>(&d_outputA,Bytes);
cudaMalloc<unsigned char>(&d_outputB,Bytes);
cudaMemcpy(d_input,input.ptr(),Bytes,cudaMemcpyHostToDevice);
const dim3 block(16,16);
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
bgr_to_gray_kernel<<<grid,block>>>(d_input,d_output,d_outputA,d_outputB,input.cols,input.rows,input.step);
cudaDeviceSynchronize();
cudaMemcpy(output.ptr(),d_output,Bytes,cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_outputA);
cudaFree(d_outputB);
}
int main()
{
std::string imagePath = "image.jpg";
cv::Mat Input = cv::imread(imagePath,CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat Output(Input.rows,Input.cols,CV_8U);
if(Input.empty())
{
std::cout<<"Image Not Found!"<<std::endl;
std::cin.get();
return -1;
}
convert_to_gray(Input,Output);
cv::imshow("Input",Input);
cv::imshow("Output",Output);
cv::waitKey();
return 0;
}
|
41bd618a5eea089164e73cd6b85a9dea52ec9f3c.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgeisai_32.cu, normal z -> d, Sun Nov 20 20:20:44 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000)
__device__
void dtrsv_lower_32kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int k;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (k = 0; k < N; k++)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn > k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_upper_32kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (int k = N-1; k > -1; k--)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_lower_32kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 1; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 2; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 3; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 4; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 5; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 6; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 7; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 8; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 9; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 10; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 11; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 12; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 13; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 14; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 15; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 16; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_17(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 17; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_18(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 18; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_19(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 19; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_20(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 20; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_21(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 21; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_22(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 22; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_23(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 23; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_24(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 24; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_25(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 25; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_26(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 26; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_27(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 27; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_28(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 28; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_29(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 29; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_30(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 30; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_31(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 31; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_32(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_lower_32kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_lower_32kernel_1( dA, dB ); break;
case 2:
dtrsv_lower_32kernel_2( dA, dB ); break;
case 3:
dtrsv_lower_32kernel_3( dA, dB ); break;
case 4:
dtrsv_lower_32kernel_4( dA, dB ); break;
case 5:
dtrsv_lower_32kernel_5( dA, dB ); break;
case 6:
dtrsv_lower_32kernel_6( dA, dB ); break;
case 7:
dtrsv_lower_32kernel_7( dA, dB ); break;
case 8:
dtrsv_lower_32kernel_8( dA, dB ); break;
case 9:
dtrsv_lower_32kernel_9( dA, dB ); break;
case 10:
dtrsv_lower_32kernel_10( dA, dB ); break;
case 11:
dtrsv_lower_32kernel_11( dA, dB ); break;
case 12:
dtrsv_lower_32kernel_12( dA, dB ); break;
case 13:
dtrsv_lower_32kernel_13( dA, dB ); break;
case 14:
dtrsv_lower_32kernel_14( dA, dB ); break;
case 15:
dtrsv_lower_32kernel_15( dA, dB ); break;
case 16:
dtrsv_lower_32kernel_16( dA, dB ); break;
case 17:
dtrsv_lower_32kernel_17( dA, dB ); break;
case 18:
dtrsv_lower_32kernel_18( dA, dB ); break;
case 19:
dtrsv_lower_32kernel_19( dA, dB ); break;
case 20:
dtrsv_lower_32kernel_20( dA, dB ); break;
case 21:
dtrsv_lower_32kernel_21( dA, dB ); break;
case 22:
dtrsv_lower_32kernel_22( dA, dB ); break;
case 23:
dtrsv_lower_32kernel_23( dA, dB ); break;
case 24:
dtrsv_lower_32kernel_24( dA, dB ); break;
case 25:
dtrsv_lower_32kernel_25( dA, dB ); break;
case 26:
dtrsv_lower_32kernel_26( dA, dB ); break;
case 27:
dtrsv_lower_32kernel_27( dA, dB ); break;
case 28:
dtrsv_lower_32kernel_28( dA, dB ); break;
case 29:
dtrsv_lower_32kernel_29( dA, dB ); break;
case 30:
dtrsv_lower_32kernel_30( dA, dB ); break;
case 31:
dtrsv_lower_32kernel_31( dA, dB ); break;
case 32:
dtrsv_lower_32kernel_32( dA, dB ); break;
default:
dtrsv_lower_32kernel_general( dA, dB, sizes ); break;
}
}
}
__device__
void dtrsv_upper_32kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 1-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 2-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 3-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 4-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 5-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 6-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 7-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 8-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 9-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 10-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 11-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 12-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 13-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 14-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 15-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 16-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_17(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 17-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_18(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 18-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_19(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 19-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_20(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 20-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_21(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 21-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_22(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 22-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_23(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 23-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_24(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 24-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_25(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 25-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_26(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 26-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_27(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 27-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_28(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 28-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_29(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 29-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_30(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 30-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_31(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 31-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_32(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_upper_32kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_upper_32kernel_1( dA, dB ); break;
case 2:
dtrsv_upper_32kernel_2( dA, dB ); break;
case 3:
dtrsv_upper_32kernel_3( dA, dB ); break;
case 4:
dtrsv_upper_32kernel_4( dA, dB ); break;
case 5:
dtrsv_upper_32kernel_5( dA, dB ); break;
case 6:
dtrsv_upper_32kernel_6( dA, dB ); break;
case 7:
dtrsv_upper_32kernel_7( dA, dB ); break;
case 8:
dtrsv_upper_32kernel_8( dA, dB ); break;
case 9:
dtrsv_upper_32kernel_9( dA, dB ); break;
case 10:
dtrsv_upper_32kernel_10( dA, dB ); break;
case 11:
dtrsv_upper_32kernel_11( dA, dB ); break;
case 12:
dtrsv_upper_32kernel_12( dA, dB ); break;
case 13:
dtrsv_upper_32kernel_13( dA, dB ); break;
case 14:
dtrsv_upper_32kernel_14( dA, dB ); break;
case 15:
dtrsv_upper_32kernel_15( dA, dB ); break;
case 16:
dtrsv_upper_32kernel_16( dA, dB ); break;
case 17:
dtrsv_upper_32kernel_17( dA, dB ); break;
case 18:
dtrsv_upper_32kernel_18( dA, dB ); break;
case 19:
dtrsv_upper_32kernel_19( dA, dB ); break;
case 20:
dtrsv_upper_32kernel_20( dA, dB ); break;
case 21:
dtrsv_upper_32kernel_21( dA, dB ); break;
case 22:
dtrsv_upper_32kernel_22( dA, dB ); break;
case 23:
dtrsv_upper_32kernel_23( dA, dB ); break;
case 24:
dtrsv_upper_32kernel_24( dA, dB ); break;
case 25:
dtrsv_upper_32kernel_25( dA, dB ); break;
case 26:
dtrsv_upper_32kernel_26( dA, dB ); break;
case 27:
dtrsv_upper_32kernel_27( dA, dB ); break;
case 28:
dtrsv_upper_32kernel_28( dA, dB ); break;
case 29:
dtrsv_upper_32kernel_29( dA, dB ); break;
case 30:
dtrsv_upper_32kernel_30( dA, dB ); break;
case 31:
dtrsv_upper_32kernel_31( dA, dB ); break;
case 32:
dtrsv_upper_32kernel_32( dA, dB ); break;
default:
dtrsv_upper_32kernel_general( dA, dB, sizes ); break;
}
}
}
// initialize arrays with zero
__global__ void
magma_dgpumemzero_32kernel(
double * d,
int n,
int dim_x,
int dim_y )
{
int i = blockIdx.y * gridDim.x + blockIdx.x;
int idx = threadIdx.x;
if( i >= n ){
return;
}
if( idx >= dim_x ){
return;
}
for( int j=0; j<dim_y; j++)
d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
__global__ void
magma_dlocations_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ];
}
}// kernel
__global__ void
magma_dlocations_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dfilltrisystems_32kernel(
magma_int_t offset,
magma_int_t limit,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset;
int ii = (blockDim.x * blockIdx.x + threadIdx.x);
if ( ii>=limit ){
return;
}
//if ( i<offset ){
// return;
//}
for( int j=0; j<sizes[ i ]; j++ ){// no need for first
int k = row[ locations[ j+i*WARP_SIZE ] ];
int l = i*WARP_SIZE;
int idx = 0;
while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == col[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ]
= val[ k ];
k++;
l++;
idx++;
} else if( col[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
// printf("increment l\n");
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}// kernel
__global__ void
magma_dbackinsert_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
int end = sizes[j];
if( j >= n ){
return;
}
if ( i>=end ){
return;
}
val[row[j]+i] = rhs[j*WARP_SIZE+i];
}// kernel
// try to do everything in shared memory and registers!
//one thread block per row of A
__global__ void
magma_dlowertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
double *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA;
__shared__ double dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_D_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j<size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_duppertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
double *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA;
__shared__ double dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_D_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j < size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == size-1) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_dlowertrisystems_32kernel(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
double *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA;
double dA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// for debuggging: let thred 0 do everything
//if(tid==0){
{
// first: generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
if( tid == idx ){
dA[ j ] = Aval[ k ];
}
//__syncthreads();
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
// not sure whether we need this here....
//__syncthreads();
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[ k ];
if (k%WARP_SIZE == tid)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_duppertrisystems_32kernel(
magma_int_t n,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
int size = mlim - mstart;
if( tid >= size ){
return;
}
// set rA to 0
for( int j=0; j<32; j++ ){
rA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int t = Mcol[ mstart + j ];
int k = Arow[ t ];
int l = mstart;
int idx = 0;
while( k < Arow[ t+1 ] && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
if( tid == idx ){
rA[ j ] = Aval[ k ];
}
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = (tid == size-1) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
if (k%32 == tid)
rB /= rA[k];
double bottom = __shfl(rB, k%32);
if ( tid < k)
rB -= (bottom*rA[k]);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}// kernel
#endif // CUDA >= 7000
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_d_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_d_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems double*
trisystems
@param[out]
rhs double*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_disaigenerator_32_gpu(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_d_matrix L,
magma_d_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// routine 1
int r1bs1 = WARP_SIZE;
int r1bs2 = 1;
int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
dim3 r1block( r1bs1, r1bs2, 1 );
dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = WARP_SIZE;
int r2bs2 = 1;
int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 );
int r2dg2 = 1;
int r2dg3 = 1;
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
int r3bs1 = WARP_SIZE;
int r3bs2 = 1;
int r3dg1 = magma_ceildiv( 32000, r2bs1 );
int r3dg2 = 1;
int r3dg3 = 1;
dim3 r3block( r3bs1, r3bs2, 1 );
dim3 r3grid( r3dg1, r3dg2, r3dg3 );
int recursive = magma_ceildiv( M->num_rows, 32000 );
if (arch >= 300) {
hipLaunchKernelGGL(( magma_dgpumemzero_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
rhs, L.num_rows, WARP_SIZE, 1);
if (uplotype == MagmaLower) {
hipLaunchKernelGGL(( magma_dlocations_lower_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
else {
hipLaunchKernelGGL(( magma_dlocations_upper_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
/*
if (uplotype == MagmaLower) {
printf("in here lower\n");
magma_dlowertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval,
sizes,
locations );
}
else {
printf("in here upper\n");
magma_duppertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval );
}
*/
// chunk it recursively into batches of 3200
for (int z=0; z < recursive; z++) {
int limit = min(32000, L.num_rows-32000*z);
hipLaunchKernelGGL(( magma_dgpumemzero_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems, limit, WARP_SIZE, WARP_SIZE );
hipLaunchKernelGGL(( magma_dfilltrisystems_32kernel), dim3(r3grid), dim3(r3block), 0, queue->cuda_stream() ,
32000*z,
limit,
L.drow,
L.dcol,
L.dval,
sizes,
locations,
trisystems,
rhs );
// routine 2
if (uplotype == MagmaLower) {
hipLaunchKernelGGL(( dtrsv_lower_32kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
else {
hipLaunchKernelGGL(( dtrsv_upper_32kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
}
// routine 3
hipLaunchKernelGGL(( magma_dbackinsert_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
rhs );
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
| 41bd618a5eea089164e73cd6b85a9dea52ec9f3c.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgeisai_32.cu, normal z -> d, Sun Nov 20 20:20:44 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000)
__device__
void dtrsv_lower_32kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int k;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (k = 0; k < N; k++)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn > k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_upper_32kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (int k = N-1; k > -1; k--)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_lower_32kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 1; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 2; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 3; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 4; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 5; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 6; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 7; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 8; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 9; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 10; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 11; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 12; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 13; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 14; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 15; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 16; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_17(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 17; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_18(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 18; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_19(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 19; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_20(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 20; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_21(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 21; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_22(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 22; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_23(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 23; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_24(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 24; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_25(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 25; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_26(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 26; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_27(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 27; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_28(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 28; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_29(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 29; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_30(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 30; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_31(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 31; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_32kernel_32(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_lower_32kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_lower_32kernel_1( dA, dB ); break;
case 2:
dtrsv_lower_32kernel_2( dA, dB ); break;
case 3:
dtrsv_lower_32kernel_3( dA, dB ); break;
case 4:
dtrsv_lower_32kernel_4( dA, dB ); break;
case 5:
dtrsv_lower_32kernel_5( dA, dB ); break;
case 6:
dtrsv_lower_32kernel_6( dA, dB ); break;
case 7:
dtrsv_lower_32kernel_7( dA, dB ); break;
case 8:
dtrsv_lower_32kernel_8( dA, dB ); break;
case 9:
dtrsv_lower_32kernel_9( dA, dB ); break;
case 10:
dtrsv_lower_32kernel_10( dA, dB ); break;
case 11:
dtrsv_lower_32kernel_11( dA, dB ); break;
case 12:
dtrsv_lower_32kernel_12( dA, dB ); break;
case 13:
dtrsv_lower_32kernel_13( dA, dB ); break;
case 14:
dtrsv_lower_32kernel_14( dA, dB ); break;
case 15:
dtrsv_lower_32kernel_15( dA, dB ); break;
case 16:
dtrsv_lower_32kernel_16( dA, dB ); break;
case 17:
dtrsv_lower_32kernel_17( dA, dB ); break;
case 18:
dtrsv_lower_32kernel_18( dA, dB ); break;
case 19:
dtrsv_lower_32kernel_19( dA, dB ); break;
case 20:
dtrsv_lower_32kernel_20( dA, dB ); break;
case 21:
dtrsv_lower_32kernel_21( dA, dB ); break;
case 22:
dtrsv_lower_32kernel_22( dA, dB ); break;
case 23:
dtrsv_lower_32kernel_23( dA, dB ); break;
case 24:
dtrsv_lower_32kernel_24( dA, dB ); break;
case 25:
dtrsv_lower_32kernel_25( dA, dB ); break;
case 26:
dtrsv_lower_32kernel_26( dA, dB ); break;
case 27:
dtrsv_lower_32kernel_27( dA, dB ); break;
case 28:
dtrsv_lower_32kernel_28( dA, dB ); break;
case 29:
dtrsv_lower_32kernel_29( dA, dB ); break;
case 30:
dtrsv_lower_32kernel_30( dA, dB ); break;
case 31:
dtrsv_lower_32kernel_31( dA, dB ); break;
case 32:
dtrsv_lower_32kernel_32( dA, dB ); break;
default:
dtrsv_lower_32kernel_general( dA, dB, sizes ); break;
}
}
}
__device__
void dtrsv_upper_32kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 1-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 2-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 3-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 4-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 5-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 6-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 7-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 8-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 9-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 10-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 11-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 12-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 13-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 14-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 15-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 16-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_17(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 17-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_18(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 18-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_19(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 19-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_20(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 20-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_21(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 21-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_22(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 22-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_23(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 23-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_24(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 24-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_25(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 25-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_26(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 26-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_27(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 27-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_28(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 28-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_29(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 29-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_30(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 30-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_31(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 31-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_32kernel_32(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_upper_32kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_upper_32kernel_1( dA, dB ); break;
case 2:
dtrsv_upper_32kernel_2( dA, dB ); break;
case 3:
dtrsv_upper_32kernel_3( dA, dB ); break;
case 4:
dtrsv_upper_32kernel_4( dA, dB ); break;
case 5:
dtrsv_upper_32kernel_5( dA, dB ); break;
case 6:
dtrsv_upper_32kernel_6( dA, dB ); break;
case 7:
dtrsv_upper_32kernel_7( dA, dB ); break;
case 8:
dtrsv_upper_32kernel_8( dA, dB ); break;
case 9:
dtrsv_upper_32kernel_9( dA, dB ); break;
case 10:
dtrsv_upper_32kernel_10( dA, dB ); break;
case 11:
dtrsv_upper_32kernel_11( dA, dB ); break;
case 12:
dtrsv_upper_32kernel_12( dA, dB ); break;
case 13:
dtrsv_upper_32kernel_13( dA, dB ); break;
case 14:
dtrsv_upper_32kernel_14( dA, dB ); break;
case 15:
dtrsv_upper_32kernel_15( dA, dB ); break;
case 16:
dtrsv_upper_32kernel_16( dA, dB ); break;
case 17:
dtrsv_upper_32kernel_17( dA, dB ); break;
case 18:
dtrsv_upper_32kernel_18( dA, dB ); break;
case 19:
dtrsv_upper_32kernel_19( dA, dB ); break;
case 20:
dtrsv_upper_32kernel_20( dA, dB ); break;
case 21:
dtrsv_upper_32kernel_21( dA, dB ); break;
case 22:
dtrsv_upper_32kernel_22( dA, dB ); break;
case 23:
dtrsv_upper_32kernel_23( dA, dB ); break;
case 24:
dtrsv_upper_32kernel_24( dA, dB ); break;
case 25:
dtrsv_upper_32kernel_25( dA, dB ); break;
case 26:
dtrsv_upper_32kernel_26( dA, dB ); break;
case 27:
dtrsv_upper_32kernel_27( dA, dB ); break;
case 28:
dtrsv_upper_32kernel_28( dA, dB ); break;
case 29:
dtrsv_upper_32kernel_29( dA, dB ); break;
case 30:
dtrsv_upper_32kernel_30( dA, dB ); break;
case 31:
dtrsv_upper_32kernel_31( dA, dB ); break;
case 32:
dtrsv_upper_32kernel_32( dA, dB ); break;
default:
dtrsv_upper_32kernel_general( dA, dB, sizes ); break;
}
}
}
// initialize arrays with zero
__global__ void
magma_dgpumemzero_32kernel(
double * d,
int n,
int dim_x,
int dim_y )
{
int i = blockIdx.y * gridDim.x + blockIdx.x;
int idx = threadIdx.x;
if( i >= n ){
return;
}
if( idx >= dim_x ){
return;
}
for( int j=0; j<dim_y; j++)
d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
__global__ void
magma_dlocations_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ];
}
}// kernel
__global__ void
magma_dlocations_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dfilltrisystems_32kernel(
magma_int_t offset,
magma_int_t limit,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset;
int ii = (blockDim.x * blockIdx.x + threadIdx.x);
if ( ii>=limit ){
return;
}
//if ( i<offset ){
// return;
//}
for( int j=0; j<sizes[ i ]; j++ ){// no need for first
int k = row[ locations[ j+i*WARP_SIZE ] ];
int l = i*WARP_SIZE;
int idx = 0;
while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == col[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ]
= val[ k ];
k++;
l++;
idx++;
} else if( col[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
// printf("increment l\n");
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}// kernel
__global__ void
magma_dbackinsert_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
int end = sizes[j];
if( j >= n ){
return;
}
if ( i>=end ){
return;
}
val[row[j]+i] = rhs[j*WARP_SIZE+i];
}// kernel
// try to do everything in shared memory and registers!
//one thread block per row of A
__global__ void
magma_dlowertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
double *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA;
__shared__ double dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_D_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j<size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_duppertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
double *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA;
__shared__ double dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_D_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j < size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == size-1) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_dlowertrisystems_32kernel(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
double *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA;
double dA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// for debuggging: let thred 0 do everything
//if(tid==0){
{
// first: generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
if( tid == idx ){
dA[ j ] = Aval[ k ];
}
//__syncthreads();
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
// not sure whether we need this here....
//__syncthreads();
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[ k ];
if (k%WARP_SIZE == tid)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_duppertrisystems_32kernel(
magma_int_t n,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
double rB; // registers for trsv
double rA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
int size = mlim - mstart;
if( tid >= size ){
return;
}
// set rA to 0
for( int j=0; j<32; j++ ){
rA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int t = Mcol[ mstart + j ];
int k = Arow[ t ];
int l = mstart;
int idx = 0;
while( k < Arow[ t+1 ] && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
if( tid == idx ){
rA[ j ] = Aval[ k ];
}
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = (tid == size-1) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
if (k%32 == tid)
rB /= rA[k];
double bottom = __shfl(rB, k%32);
if ( tid < k)
rB -= (bottom*rA[k]);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}// kernel
#endif // CUDA >= 7000
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_d_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_d_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems double*
trisystems
@param[out]
rhs double*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_disaigenerator_32_gpu(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_d_matrix L,
magma_d_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// routine 1
int r1bs1 = WARP_SIZE;
int r1bs2 = 1;
int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
dim3 r1block( r1bs1, r1bs2, 1 );
dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = WARP_SIZE;
int r2bs2 = 1;
int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 );
int r2dg2 = 1;
int r2dg3 = 1;
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
int r3bs1 = WARP_SIZE;
int r3bs2 = 1;
int r3dg1 = magma_ceildiv( 32000, r2bs1 );
int r3dg2 = 1;
int r3dg3 = 1;
dim3 r3block( r3bs1, r3bs2, 1 );
dim3 r3grid( r3dg1, r3dg2, r3dg3 );
int recursive = magma_ceildiv( M->num_rows, 32000 );
if (arch >= 300) {
magma_dgpumemzero_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
rhs, L.num_rows, WARP_SIZE, 1);
if (uplotype == MagmaLower) {
magma_dlocations_lower_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
else {
magma_dlocations_upper_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
/*
if (uplotype == MagmaLower) {
printf("in here lower\n");
magma_dlowertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval,
sizes,
locations );
}
else {
printf("in here upper\n");
magma_duppertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval );
}
*/
// chunk it recursively into batches of 3200
for (int z=0; z < recursive; z++) {
int limit = min(32000, L.num_rows-32000*z);
magma_dgpumemzero_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems, limit, WARP_SIZE, WARP_SIZE );
magma_dfilltrisystems_32kernel<<< r3grid, r3block, 0, queue->cuda_stream() >>>(
32000*z,
limit,
L.drow,
L.dcol,
L.dval,
sizes,
locations,
trisystems,
rhs );
// routine 2
if (uplotype == MagmaLower) {
dtrsv_lower_32kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
else {
dtrsv_upper_32kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
}
// routine 3
magma_dbackinsert_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
rhs );
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
2afb5f33406aad0a126f8d7dd121b7840a479773.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__global__ void reduce_max_min(const float* const d_in, float* d_out, bool is_max=true)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
partial[tid] = d_in[idx];
// make sure all data in this block has loaded into shared memory
__syncthreads();
for(unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if(tid < stride){
if(is_max)
partial[tid] = max(partial[tid], partial[tid+stride]);
else
partial[tid] = min(partial[tid], partial[tid+stride]);
}
// make sure all operations at one stage are done!
__syncthreads();
}
if(tid == 0)
d_out[blockIdx.x] = partial[tid];
}
void reduce(const float* const d_in,float &min_logLum,float &max_logLum,const size_t numRows,const size_t numCols)
{
const int BLOCK_SIZE = numCols;
const int GRID_SIZE = numRows;
// declare GPU memory pointers
float * d_intermediate, *d_max, *d_min;
// allocate GPU memory
hipMalloc((void **) &d_intermediate, GRID_SIZE*sizeof(float));
hipMalloc((void **) &d_max, sizeof(float));
hipMalloc((void **) &d_min, sizeof(float));
// find maximum;
// firstly, find the maximum in each block
hipLaunchKernelGGL(( reduce_max_min), dim3(GRID_SIZE),dim3(BLOCK_SIZE), BLOCK_SIZE*sizeof(float), 0, d_in, d_intermediate, true);
// then, find the global maximum
hipLaunchKernelGGL(( reduce_max_min), dim3(1), dim3(GRID_SIZE), GRID_SIZE*sizeof(float), 0, d_intermediate, d_max, true);
checkCudaErrors(hipMemset(d_intermediate,0,GRID_SIZE*sizeof(float)));
// find minimum;
// firstly, find the minimum in each block
hipLaunchKernelGGL(( reduce_max_min), dim3(GRID_SIZE),dim3(BLOCK_SIZE), BLOCK_SIZE*sizeof(float), 0, d_in, d_intermediate,false);
// then, find the global minimum
hipLaunchKernelGGL(( reduce_max_min), dim3(1), dim3(GRID_SIZE), GRID_SIZE*sizeof(float), 0, d_intermediate, d_min, false);
// transfer the output to CPU
checkCudaErrors(hipMemcpy(&max_logLum, d_max, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_logLum, d_min, sizeof(float), hipMemcpyDeviceToHost));
// free GPU memory location
checkCudaErrors(hipFree(d_intermediate));
checkCudaErrors(hipFree(d_max));
checkCudaErrors(hipFree(d_min));
return;
}
__global__ void hist(const float* const d_in, unsigned int * const d_out, const float logLumRange, const int min_logLum, const int numBins)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float num = d_in[idx];
int bin_idx = (num - min_logLum)/logLumRange*numBins;
if(bin_idx >= numBins)
bin_idx--;
atomicAdd(&(d_out[bin_idx]),1);
}
// Hillis Steele Scan
__global__ void prefixSum_HS(const unsigned int * const d_in, unsigned int * const d_out)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
// make sure all data in this block are loaded into shared shared memory
partial[tid] = d_in[tid];
__syncthreads();
for(unsigned int stride = 1; stride < blockDim.x; stride <<= 1){
if(tid + stride < blockDim.x)
partial[tid+stride] += partial[tid];
// make sure all operations at one stage are done!
__syncthreads();
}
// exclusive scan
if(tid == 0)
d_out[tid] = 0;
else
d_out[tid] = partial[tid-1];
}
// Blelloch Scan
__global__ void prefixSum_BL(const unsigned int * const d_in, unsigned int * const d_out)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
// make sure all data in this block are loaded into shared shared memory
partial[tid] = d_in[tid];
__syncthreads();
// reduce step
for(unsigned int stride = 1; stride < blockDim.x/2; stride <<= 1){
// first update all idx == 2n-1, then 4n-1, then 8n-1 ...
// finaly blockDim.x/2 * n - 1(only 1 value will be updated partial[blockDim.x-1])
int idx = (tid+1)*stride*2 - 1;
if( idx < blockDim.x)
partial[idx] += partial[idx-stride];
// make sure all operations at one stage are done!
__syncthreads();
}
// Downsweep Step
// set identity value
if(tid == blockDim.x-1)
partial[tid] = 0;
for(unsigned int stride = blockDim.x/2; stride > 0; stride >>= 1){
if( (tid+1) % (stride*2) == 0){
unsigned int temp = partial[tid-stride];
partial[tid-stride] = partial[tid];
partial[tid] += temp;
}
// make sure all operations at one stage are done!
__syncthreads();
}
d_out[tid] = partial[tid];
}
// Scan algorithm from Course : Hetergeneous Parallel Programming
__global__ void prefixSum_HPP(const unsigned int * const d_in, unsigned int * const d_out)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
// make sure all data in this block are loaded into shared shared memory
partial[tid] = d_in[tid];
__syncthreads();
// Reduction Phase
for(unsigned int stride = 1; stride < blockDim.x/2; stride <<= 1){
// first update all idx == 2n-1, then 4n-1, then 8n-1 ...
// finaly 2(blockDim.x/2) * n - 1(only 1 value will be updated partial[blockDim.x-1])
int idx = (tid+1)*stride*2 - 1;
if( idx < blockDim.x)
partial[idx] += partial[idx-stride];
// make sure all operations at one stage are done!
__syncthreads();
}
// Example:
// After reduction phase , position at 0, 1, 3, 7, ... has their final values (blockDim.x == 8)
// then we update values reversely.
// first use position 3's value to update position 5(stride == 2 == blockDim.x/4, idx == 3 == (0+1)*2*2-1, only 1 thread do calculation)
// then use position 1 to update postion 2 , position 3 to update position 4, position 5 to update position 6
// (stride == 1 == blockDim.x/8, idx == (0+1)*1*2-1=1,(1+1)*1*2-1=3, (2+1)*1*2-1=5, 3 threads do calculation)
// Post Reduction Reverse Phase
for(unsigned int stride = blockDim.x/4; stride > 0; stride >>= 1){
// first update all idx == 2(blockDim.x/4) * n - 1 + blockDim.x/4,
// then 2(blockDim.x/8)n-1+blockDim.x/8, then 2(blockDim.x/16)n-1 + blockDim.x/16...
// finaly 2 * n - 1
int idx = (tid+1)*stride*2 - 1;
if( idx + stride < blockDim.x)
partial[idx + stride] += partial[idx];
// make sure all operations at one stage are done!
__syncthreads();
}
// exclusive scan
if(tid == 0)
d_out[tid] = 0;
else
d_out[tid] = partial[tid-1];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// Step 1 : find minimum and maximum value
reduce(d_logLuminance, min_logLum, max_logLum, numRows, numCols);
// Step 2: find the range
float logLumRange = max_logLum - min_logLum;
// Step 3 : generate a histogram of all the values
// declare GPU memory pointers
unsigned int *d_bins;
// allocate GPU memory
checkCudaErrors(hipMalloc((void **) &d_bins, numBins*sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_bins,0,numBins*sizeof(unsigned int)));
hipLaunchKernelGGL(( hist), dim3(numRows), dim3(numCols), 0, 0, d_logLuminance, d_bins, logLumRange, min_logLum, numBins);
// Step 4 : prefix sum
hipLaunchKernelGGL(( prefixSum_BL), dim3(1), dim3(numBins), numBins*sizeof(unsigned int), 0, d_bins, d_cdf);
// free GPU memory allocation
checkCudaErrors(hipFree(d_bins));
}
| 2afb5f33406aad0a126f8d7dd121b7840a479773.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__global__ void reduce_max_min(const float* const d_in, float* d_out, bool is_max=true)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
partial[tid] = d_in[idx];
// make sure all data in this block has loaded into shared memory
__syncthreads();
for(unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if(tid < stride){
if(is_max)
partial[tid] = max(partial[tid], partial[tid+stride]);
else
partial[tid] = min(partial[tid], partial[tid+stride]);
}
// make sure all operations at one stage are done!
__syncthreads();
}
if(tid == 0)
d_out[blockIdx.x] = partial[tid];
}
void reduce(const float* const d_in,float &min_logLum,float &max_logLum,const size_t numRows,const size_t numCols)
{
const int BLOCK_SIZE = numCols;
const int GRID_SIZE = numRows;
// declare GPU memory pointers
float * d_intermediate, *d_max, *d_min;
// allocate GPU memory
cudaMalloc((void **) &d_intermediate, GRID_SIZE*sizeof(float));
cudaMalloc((void **) &d_max, sizeof(float));
cudaMalloc((void **) &d_min, sizeof(float));
// find maximum;
// firstly, find the maximum in each block
reduce_max_min<<<GRID_SIZE,BLOCK_SIZE, BLOCK_SIZE*sizeof(float)>>>(d_in, d_intermediate, true);
// then, find the global maximum
reduce_max_min<<<1, GRID_SIZE, GRID_SIZE*sizeof(float)>>>(d_intermediate, d_max, true);
checkCudaErrors(cudaMemset(d_intermediate,0,GRID_SIZE*sizeof(float)));
// find minimum;
// firstly, find the minimum in each block
reduce_max_min<<<GRID_SIZE,BLOCK_SIZE, BLOCK_SIZE*sizeof(float)>>>(d_in, d_intermediate,false);
// then, find the global minimum
reduce_max_min<<<1, GRID_SIZE, GRID_SIZE*sizeof(float)>>>(d_intermediate, d_min, false);
// transfer the output to CPU
checkCudaErrors(cudaMemcpy(&max_logLum, d_max, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_logLum, d_min, sizeof(float), cudaMemcpyDeviceToHost));
// free GPU memory location
checkCudaErrors(cudaFree(d_intermediate));
checkCudaErrors(cudaFree(d_max));
checkCudaErrors(cudaFree(d_min));
return;
}
__global__ void hist(const float* const d_in, unsigned int * const d_out, const float logLumRange, const int min_logLum, const int numBins)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float num = d_in[idx];
int bin_idx = (num - min_logLum)/logLumRange*numBins;
if(bin_idx >= numBins)
bin_idx--;
atomicAdd(&(d_out[bin_idx]),1);
}
// Hillis Steele Scan
__global__ void prefixSum_HS(const unsigned int * const d_in, unsigned int * const d_out)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
// make sure all data in this block are loaded into shared shared memory
partial[tid] = d_in[tid];
__syncthreads();
for(unsigned int stride = 1; stride < blockDim.x; stride <<= 1){
if(tid + stride < blockDim.x)
partial[tid+stride] += partial[tid];
// make sure all operations at one stage are done!
__syncthreads();
}
// exclusive scan
if(tid == 0)
d_out[tid] = 0;
else
d_out[tid] = partial[tid-1];
}
// Blelloch Scan
__global__ void prefixSum_BL(const unsigned int * const d_in, unsigned int * const d_out)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
// make sure all data in this block are loaded into shared shared memory
partial[tid] = d_in[tid];
__syncthreads();
// reduce step
for(unsigned int stride = 1; stride < blockDim.x/2; stride <<= 1){
// first update all idx == 2n-1, then 4n-1, then 8n-1 ...
// finaly blockDim.x/2 * n - 1(only 1 value will be updated partial[blockDim.x-1])
int idx = (tid+1)*stride*2 - 1;
if( idx < blockDim.x)
partial[idx] += partial[idx-stride];
// make sure all operations at one stage are done!
__syncthreads();
}
// Downsweep Step
// set identity value
if(tid == blockDim.x-1)
partial[tid] = 0;
for(unsigned int stride = blockDim.x/2; stride > 0; stride >>= 1){
if( (tid+1) % (stride*2) == 0){
unsigned int temp = partial[tid-stride];
partial[tid-stride] = partial[tid];
partial[tid] += temp;
}
// make sure all operations at one stage are done!
__syncthreads();
}
d_out[tid] = partial[tid];
}
// Scan algorithm from Course : Hetergeneous Parallel Programming
__global__ void prefixSum_HPP(const unsigned int * const d_in, unsigned int * const d_out)
{
extern __shared__ float partial[];
int tid = threadIdx.x;
// make sure all data in this block are loaded into shared shared memory
partial[tid] = d_in[tid];
__syncthreads();
// Reduction Phase
for(unsigned int stride = 1; stride < blockDim.x/2; stride <<= 1){
// first update all idx == 2n-1, then 4n-1, then 8n-1 ...
// finaly 2(blockDim.x/2) * n - 1(only 1 value will be updated partial[blockDim.x-1])
int idx = (tid+1)*stride*2 - 1;
if( idx < blockDim.x)
partial[idx] += partial[idx-stride];
// make sure all operations at one stage are done!
__syncthreads();
}
// Example:
// After reduction phase , position at 0, 1, 3, 7, ... has their final values (blockDim.x == 8)
// then we update values reversely.
// first use position 3's value to update position 5(stride == 2 == blockDim.x/4, idx == 3 == (0+1)*2*2-1, only 1 thread do calculation)
// then use position 1 to update postion 2 , position 3 to update position 4, position 5 to update position 6
// (stride == 1 == blockDim.x/8, idx == (0+1)*1*2-1=1,(1+1)*1*2-1=3, (2+1)*1*2-1=5, 3 threads do calculation)
// Post Reduction Reverse Phase
for(unsigned int stride = blockDim.x/4; stride > 0; stride >>= 1){
// first update all idx == 2(blockDim.x/4) * n - 1 + blockDim.x/4,
// then 2(blockDim.x/8)n-1+blockDim.x/8, then 2(blockDim.x/16)n-1 + blockDim.x/16...
// finaly 2 * n - 1
int idx = (tid+1)*stride*2 - 1;
if( idx + stride < blockDim.x)
partial[idx + stride] += partial[idx];
// make sure all operations at one stage are done!
__syncthreads();
}
// exclusive scan
if(tid == 0)
d_out[tid] = 0;
else
d_out[tid] = partial[tid-1];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// Step 1 : find minimum and maximum value
reduce(d_logLuminance, min_logLum, max_logLum, numRows, numCols);
// Step 2: find the range
float logLumRange = max_logLum - min_logLum;
// Step 3 : generate a histogram of all the values
// declare GPU memory pointers
unsigned int *d_bins;
// allocate GPU memory
checkCudaErrors(cudaMalloc((void **) &d_bins, numBins*sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_bins,0,numBins*sizeof(unsigned int)));
hist<<<numRows, numCols>>>(d_logLuminance, d_bins, logLumRange, min_logLum, numBins);
// Step 4 : prefix sum
prefixSum_BL<<<1, numBins, numBins*sizeof(unsigned int)>>>(d_bins, d_cdf);
// free GPU memory allocation
checkCudaErrors(cudaFree(d_bins));
}
|
6a04110c9f0827106ec113c9b20ab43bfd0d250f.hip | // !!! This is a file automatically generated by hipify!!!
/*
Fractal code
Copyright 2022 Martin Burtscher
Redistribution in source or binary form, with or without modification, is not
permitted. Use in source or binary form, with or without modification, is only
permitted for academic use in CS 4380 and CS 5351 at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Worked by:
- Chuong Dinh Vu
- Noah del Angel
*/
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <algorithm>
#include <sys/time.h>
#include "BMP24.h"
static const int ThreadsPerBlock = 512;
static __global__ void fractal(const int frames, const int width, unsigned char* const pic)
{
// Determine what iteration we are
const int i = threadIdx.x + blockIdx.x * blockDim.x;
// Calculate the current frame and row
const int col = i % width;
const int row = ( i / width ) % width;
const int frame = i / ( width * width );
const double Delta = 0.004937716;
const double xMid = -0.664689302;
const double yMid = 0.355561972;
// compute pixels of each frame
if( i < frames * ( width * width )){ // frames
const double delta = Delta * (1.5 + cos(2.0 * M_PI * frame / frames));
const double xMin = xMid - delta;
const double yMin = yMid - delta;
const double dw = 2.0 * delta / width;
const double cy = yMin + row * dw;
const double cx = xMin + col * dw;
double x = cx;
double y = cy;
double x2, y2;
int count = 256;
do {
x2 = x * x;
y2 = y * y;
y = 2.0 * x * y + cy;
x = x2 - y2 + cx;
count--;
} while ((count > 0) && ((x2 + y2) < 4.0));
pic[frame * width * width + row * width + col] = (unsigned char)count;
}
}
static void CheckCuda(const int line)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "CUDA error %d on line %d; %s\n", e, line, hipGetErrorString(e));
exit(-1);
}
}
int main(int argc, char* argv [])
{
printf("Fractal v2.3\n");
// check command line
if (argc != 3) {fprintf(stderr, "USAGE: %s frame_width number_of_frames\n", argv[0]); exit(-1);}
const int width = atoi(argv[1]);
if (width < 10) {fprintf(stderr, "ERROR: frame_width must be at least 10\n"); exit(-1);}
const int frames = atoi(argv[2]);
if (frames < 1) {fprintf(stderr, "ERROR: number_of_frames must be at least 1\n"); exit(-1);}
printf("frames: %d\n", frames);
printf("width: %d\n", width);
// allocate picture array
unsigned char* pic = new unsigned char [frames * width * width];
// Allocate picture array on GPU
unsigned char* d_pic;
hipMalloc((void **) &d_pic, sizeof(unsigned char) * frames * width * width );
CheckCuda(__LINE__);
// start time
timeval beg, end;
gettimeofday(&beg, NULL);
// execute timed code
hipLaunchKernelGGL(( fractal), dim3(( (frames * width * width) + ThreadsPerBlock - 1 ) / ThreadsPerBlock) , dim3(ThreadsPerBlock), 0, 0, frames, width, d_pic);
hipDeviceSynchronize(); // wait for kernel to finish
// end time
gettimeofday(&end, NULL);
CheckCuda(__LINE__);
const double runtime = end.tv_sec - beg.tv_sec + (end.tv_usec - beg.tv_usec) / 1000000.0;
printf("compute time: %.6f s\n", runtime);
// Get resule from GPU
hipMemcpy( pic, d_pic, sizeof(unsigned char) * frames * width * width, hipMemcpyDeviceToHost);
CheckCuda(__LINE__);
// write result to BMP files
if (width <= 256) {
for (int frame = 0; frame < frames; frame++) {
BMP24 bmp(0, 0, width, width);
for (int y = 0; y < width; y++) {
for (int x = 0; x < width; x++) {
bmp.dot(x, y, pic[frame * width * width + y * width + x] * 0x000001 + 0x50ff00 - pic[frame * width * width + y * width + x] * 0x000100);
}
}
char name [32];
sprintf(name, "fractal%d.bmp", frame + 1000);
bmp.save(name);
}
}
// clean up
hipFree(d_pic);
delete [] pic;
return 0;
}
| 6a04110c9f0827106ec113c9b20ab43bfd0d250f.cu | /*
Fractal code
Copyright 2022 Martin Burtscher
Redistribution in source or binary form, with or without modification, is not
permitted. Use in source or binary form, with or without modification, is only
permitted for academic use in CS 4380 and CS 5351 at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Worked by:
- Chuong Dinh Vu
- Noah del Angel
*/
#include <cstdio>
#include <cuda.h>
#include <cmath>
#include <algorithm>
#include <sys/time.h>
#include "BMP24.h"
static const int ThreadsPerBlock = 512;
static __global__ void fractal(const int frames, const int width, unsigned char* const pic)
{
// Determine what iteration we are
const int i = threadIdx.x + blockIdx.x * blockDim.x;
// Calculate the current frame and row
const int col = i % width;
const int row = ( i / width ) % width;
const int frame = i / ( width * width );
const double Delta = 0.004937716;
const double xMid = -0.664689302;
const double yMid = 0.355561972;
// compute pixels of each frame
if( i < frames * ( width * width )){ // frames
const double delta = Delta * (1.5 + cos(2.0 * M_PI * frame / frames));
const double xMin = xMid - delta;
const double yMin = yMid - delta;
const double dw = 2.0 * delta / width;
const double cy = yMin + row * dw;
const double cx = xMin + col * dw;
double x = cx;
double y = cy;
double x2, y2;
int count = 256;
do {
x2 = x * x;
y2 = y * y;
y = 2.0 * x * y + cy;
x = x2 - y2 + cx;
count--;
} while ((count > 0) && ((x2 + y2) < 4.0));
pic[frame * width * width + row * width + col] = (unsigned char)count;
}
}
static void CheckCuda(const int line)
{
cudaError_t e;
cudaDeviceSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "CUDA error %d on line %d; %s\n", e, line, cudaGetErrorString(e));
exit(-1);
}
}
int main(int argc, char* argv [])
{
printf("Fractal v2.3\n");
// check command line
if (argc != 3) {fprintf(stderr, "USAGE: %s frame_width number_of_frames\n", argv[0]); exit(-1);}
const int width = atoi(argv[1]);
if (width < 10) {fprintf(stderr, "ERROR: frame_width must be at least 10\n"); exit(-1);}
const int frames = atoi(argv[2]);
if (frames < 1) {fprintf(stderr, "ERROR: number_of_frames must be at least 1\n"); exit(-1);}
printf("frames: %d\n", frames);
printf("width: %d\n", width);
// allocate picture array
unsigned char* pic = new unsigned char [frames * width * width];
// Allocate picture array on GPU
unsigned char* d_pic;
cudaMalloc((void **) &d_pic, sizeof(unsigned char) * frames * width * width );
CheckCuda(__LINE__);
// start time
timeval beg, end;
gettimeofday(&beg, NULL);
// execute timed code
fractal<<<( (frames * width * width) + ThreadsPerBlock - 1 ) / ThreadsPerBlock , ThreadsPerBlock>>>(frames, width, d_pic);
cudaDeviceSynchronize(); // wait for kernel to finish
// end time
gettimeofday(&end, NULL);
CheckCuda(__LINE__);
const double runtime = end.tv_sec - beg.tv_sec + (end.tv_usec - beg.tv_usec) / 1000000.0;
printf("compute time: %.6f s\n", runtime);
// Get resule from GPU
cudaMemcpy( pic, d_pic, sizeof(unsigned char) * frames * width * width, cudaMemcpyDeviceToHost);
CheckCuda(__LINE__);
// write result to BMP files
if (width <= 256) {
for (int frame = 0; frame < frames; frame++) {
BMP24 bmp(0, 0, width, width);
for (int y = 0; y < width; y++) {
for (int x = 0; x < width; x++) {
bmp.dot(x, y, pic[frame * width * width + y * width + x] * 0x000001 + 0x50ff00 - pic[frame * width * width + y * width + x] * 0x000100);
}
}
char name [32];
sprintf(name, "fractal%d.bmp", frame + 1000);
bmp.save(name);
}
}
// clean up
cudaFree(d_pic);
delete [] pic;
return 0;
}
|
c1639c432478e1012599f24d8021a156f03c9d18.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include "spamfilter_utils_hip.cuh"
#include "utils/spamfilter_timer.h"
// adds two device vectors with CuBLAS and stores the results in the first one
void p_add_vectors(hipblasHandle_t handle, float* a, float* b, const size_t size, const float scale_for_a){
hipblasSaxpy(handle, size, &scale_for_a, b, 1, a, 1);
}
// void addVectors(float* a, float* b, const size_t size, const float scale_for_b) {
// for (size_t j = 0; j < size; j++) {
// a[j] += scale_for_b * b[j];
// }
// }
// computes dot product with CuBLAS for two given vectors a and b
float p_dotProduct(hipblasHandle_t handle, float* a, float* b, float* d_a, float* d_b, const size_t size) {
float result[1];
hipblasSdot (handle, size, d_a, 1, d_b, 1, result);
hipDeviceSynchronize();
return *result;
}
// float dotProduct(float* d_a, float* d_b, const size_t num_elems) {
// float result = 0;
// for (size_t j = 0; j < num_elems; j++) {
// result += d_a[j] * d_b[j];
// }
// return result;
// }
// Parallel implementation of matrix vector multiplication. Each thread goes
// a certain number of features and strides by the number of threads in the
// whole mini batch.
__device__ void d_matrixVectorMultiply(
FeatureType* matrix,
FeatureType* vect,
float scalar,
size_t batch_size,
size_t num_features,
size_t threads_per_mini_batch,
FeatureType* result) {
size_t tidx = threadIdx.x;
size_t bidx = blockIdx.x;
for (int j = 0; j < batch_size; j++) {
for (int i = tidx; i < num_features; i += threads_per_mini_batch) {
// index of the point with respect to the whole dataset
size_t point_idx = bidx * batch_size + j;
// index of the feature with respect to all features in the dataset
size_t feature_idx = point_idx * num_features + i;
result[i] += matrix[feature_idx] * vect[j] * scalar;
}
}
}
// updates the parameters using atomics
__device__ void d_updateParameters(
FeatureType* gradient,
FeatureType* parameter_vector,
size_t num_features,
size_t threads_per_mini_batch,
double step_size) {
size_t tidx = threadIdx.x;
for (size_t i = tidx; i < num_features; i += threads_per_mini_batch) {
FeatureType gradient_times_step_size = gradient[i] * step_size;
atomicAdd(¶meter_vector[i], -gradient_times_step_size);
}
}
// initializes all values in array to a certain value
__device__ void d_memset(
FeatureType* array,
float value,
size_t num_elements,
size_t threads_per_mini_batch) {
size_t tidx = threadIdx.x;
for (size_t i = tidx; i < num_elements; i += threads_per_mini_batch) {
array[i] = value;
}
}
// computes logistic function for a given parameter vector (theta) and a data point (x_i)
double p_logisticFunction(hipblasHandle_t handle, FeatureType* d_theta, FeatureType* d_x_i, const size_t num_features) {
return logisticFunction(p_dotProduct(handle, d_theta, d_x_i, num_features));
}
// double logisticFunction(FeatureType* d_theta, FeatureType* d_x_i, const size_t num_features) {
// return d_logisticFunction(dotProduct(d_theta, d_x_i, num_features));
//}
// computes logistic function with fast exp
__device__ float d_logisticFunction(float exponent) {
return (1.0f / (1.0f + __expf(-exponent)));
}
// verify the device properties satisfy the assumptions of the kernel
// check that the resulting grid and block dimensions
// dont' violate device limits
bool checkDeviceProps(
size_t shared_memory_size,
dim3 block_size,
dim3 grid_size) {
bool devicePropsOK = true;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if (shared_memory_size > deviceProp.sharedMemPerBlock) {
// printf("Shared Memory size exceeds maximum allowed size.\n");
printf("SM-");
devicePropsOK = false;
}
if (block_size.x > deviceProp.maxThreadsDim[0]
|| grid_size.x > deviceProp.maxGridSize[0]) {
// printf("Grid or block size exceeds maximum allowed size.\n");
printf("B-");
devicePropsOK = false;
}
return devicePropsOK;
}
// updates the parameters (theta)
void p_updateParameters(FeatureType* d_theta, FeatureType* d_gradient, size_t num_features, float step_size, bool revert) {
float sign = revert ? 1 : -1;
step_size *= sign;
addVectors(d_theta, d_gradient, num_features, step_size);
hipblasSaxpy(handle, num_features, &step_size, d_gradient, 1, d_theta, 1);
}
| c1639c432478e1012599f24d8021a156f03c9d18.cu | #include <math.h>
#include "spamfilter_utils_cuda.cuh"
#include "utils/spamfilter_timer.h"
// adds two device vectors with CuBLAS and stores the results in the first one
void p_add_vectors(cublasHandle_t handle, float* a, float* b, const size_t size, const float scale_for_a){
cublasSaxpy(handle, size, &scale_for_a, b, 1, a, 1);
}
// void addVectors(float* a, float* b, const size_t size, const float scale_for_b) {
// for (size_t j = 0; j < size; j++) {
// a[j] += scale_for_b * b[j];
// }
// }
// computes dot product with CuBLAS for two given vectors a and b
float p_dotProduct(cublasHandle_t handle, float* a, float* b, float* d_a, float* d_b, const size_t size) {
float result[1];
cublasSdot (handle, size, d_a, 1, d_b, 1, result);
cudaDeviceSynchronize();
return *result;
}
// float dotProduct(float* d_a, float* d_b, const size_t num_elems) {
// float result = 0;
// for (size_t j = 0; j < num_elems; j++) {
// result += d_a[j] * d_b[j];
// }
// return result;
// }
// Parallel implementation of matrix vector multiplication. Each thread goes
// a certain number of features and strides by the number of threads in the
// whole mini batch.
__device__ void d_matrixVectorMultiply(
FeatureType* matrix,
FeatureType* vect,
float scalar,
size_t batch_size,
size_t num_features,
size_t threads_per_mini_batch,
FeatureType* result) {
size_t tidx = threadIdx.x;
size_t bidx = blockIdx.x;
for (int j = 0; j < batch_size; j++) {
for (int i = tidx; i < num_features; i += threads_per_mini_batch) {
// index of the point with respect to the whole dataset
size_t point_idx = bidx * batch_size + j;
// index of the feature with respect to all features in the dataset
size_t feature_idx = point_idx * num_features + i;
result[i] += matrix[feature_idx] * vect[j] * scalar;
}
}
}
// updates the parameters using atomics
__device__ void d_updateParameters(
FeatureType* gradient,
FeatureType* parameter_vector,
size_t num_features,
size_t threads_per_mini_batch,
double step_size) {
size_t tidx = threadIdx.x;
for (size_t i = tidx; i < num_features; i += threads_per_mini_batch) {
FeatureType gradient_times_step_size = gradient[i] * step_size;
atomicAdd(¶meter_vector[i], -gradient_times_step_size);
}
}
// initializes all values in array to a certain value
__device__ void d_memset(
FeatureType* array,
float value,
size_t num_elements,
size_t threads_per_mini_batch) {
size_t tidx = threadIdx.x;
for (size_t i = tidx; i < num_elements; i += threads_per_mini_batch) {
array[i] = value;
}
}
// computes logistic function for a given parameter vector (theta) and a data point (x_i)
double p_logisticFunction(cublasHandle_t handle, FeatureType* d_theta, FeatureType* d_x_i, const size_t num_features) {
return logisticFunction(p_dotProduct(handle, d_theta, d_x_i, num_features));
}
// double logisticFunction(FeatureType* d_theta, FeatureType* d_x_i, const size_t num_features) {
// return d_logisticFunction(dotProduct(d_theta, d_x_i, num_features));
//}
// computes logistic function with fast exp
__device__ float d_logisticFunction(float exponent) {
return (1.0f / (1.0f + __expf(-exponent)));
}
// verify the device properties satisfy the assumptions of the kernel
// check that the resulting grid and block dimensions
// dont' violate device limits
bool checkDeviceProps(
size_t shared_memory_size,
dim3 block_size,
dim3 grid_size) {
bool devicePropsOK = true;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if (shared_memory_size > deviceProp.sharedMemPerBlock) {
// printf("Shared Memory size exceeds maximum allowed size.\n");
printf("SM-");
devicePropsOK = false;
}
if (block_size.x > deviceProp.maxThreadsDim[0]
|| grid_size.x > deviceProp.maxGridSize[0]) {
// printf("Grid or block size exceeds maximum allowed size.\n");
printf("B-");
devicePropsOK = false;
}
return devicePropsOK;
}
// updates the parameters (theta)
void p_updateParameters(FeatureType* d_theta, FeatureType* d_gradient, size_t num_features, float step_size, bool revert) {
float sign = revert ? 1 : -1;
step_size *= sign;
addVectors(d_theta, d_gradient, num_features, step_size);
cublasSaxpy(handle, num_features, &step_size, d_gradient, 1, d_theta, 1);
}
|
7352e1c1ac60aa2d8e6147be3b03c3c498b479f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
//defaults tuned for K40
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 4
#endif
#ifndef tile_size_x
#define tile_size_x 4
#endif
#ifndef tile_size_y
#define tile_size_y 2
#endif
#ifndef use_shared_mem
#define use_shared_mem 1
#endif
template <int tile_size, int sh_stride, int d_stride, typename T>
__device__ __forceinline__ void fill_shared_mem_tiled_1D(T* sh_mem, const T *d_mem, int sh_offset, int d_offset, int N) {
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
if (d_offset+ti*d_stride < N) {
sh_mem[sh_offset+ti*sh_stride] = d_mem[d_offset+ti*d_stride];
}
}
}
/*
* This function performs the main body of work for computing the Bhattacharya
* cost function for two given point sets.
* The parallelization is such that a 2D array of 2D thread blocks are created
* to match the m*n iteration space. The amount of work per thread is controlled
* through tiling factors tile_size_x and tile_size_y.
* The cross term is reduced to a single value per thread block, which then needs
* to be reduced to a single value in a second kernel.
*/
template<typename T, int dim>
__device__ __forceinline__ void ExpDist_tiled(const T *A, const T *B,
int m, int n, const T *scale_A, const T *scale_B, T *d_cross_term) {
// Specialize BlockReduce for a 1D block of block_size_x threads on type T
typedef hipcub::BlockReduce<T, block_size_x, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduce;
// Allocate shared memory for BlockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
T cross_term = 0.0;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx + blockIdx.x * block_size_x * tile_size_x;
int j = ty + blockIdx.y * block_size_y * tile_size_y;
#if use_shared_mem == 1
__shared__ T sh_A[dim][block_size_x*tile_size_x];
__shared__ T sh_B[dim][block_size_y*tile_size_y];
__shared__ T sh_scale_A[block_size_x*tile_size_x];
__shared__ T sh_scale_B[block_size_y*tile_size_y];
#pragma unroll
for (int d=0; d<dim; d++) {
fill_shared_mem_tiled_1D<tile_size_x, block_size_x, dim*block_size_x>(sh_A[d], A, tx, d + dim * i, dim * m);
fill_shared_mem_tiled_1D<tile_size_y, block_size_y, dim*block_size_y>(sh_B[d], B, ty, d + dim * j, dim * n);
}
fill_shared_mem_tiled_1D<tile_size_x, block_size_x, block_size_x>(sh_scale_A, scale_A, tx, i, m);
fill_shared_mem_tiled_1D<tile_size_y, block_size_y, block_size_y>(sh_scale_B, scale_B, ty, j, n);
__syncthreads();
#endif
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
#pragma unroll
for (int tj=0; tj<tile_size_y; tj++) {
if ((i+ti*block_size_x < m) && (j+tj*block_size_y < n)) {
T dist_ij = 0;
#if use_shared_mem == 0
#pragma unroll
for (int d=0; d<dim; d++) {
int id = dim * (i+ti*block_size_x) + d;
int jd = dim * (j+tj*block_size_y) + d;
dist_ij += (A[id]-B[jd])*(A[id]-B[jd]);
}
cross_term += exp(-dist_ij/(scale_A[i+ti*block_size_x] + scale_B[j+tj*block_size_y]));
#elif use_shared_mem == 1
#pragma unroll
for (int d=0; d<dim; d++) {
dist_ij += (sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y])*(sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y]);
}
cross_term += exp(-dist_ij/(sh_scale_A[tx+ti*block_size_x] + sh_scale_B[ty+tj*block_size_y]));
#endif
}
}
}
//reduce cross_term within the block
cross_term = BlockReduce(temp_storage).Sum(cross_term);
//write back the per-thread block partial cross term
if (tx == 0 && ty == 0) {
d_cross_term[blockIdx.y*gridDim.x+blockIdx.x] = cross_term;
}
}
extern "C"
__global__ void
ExpDist(const double *A, const double *B,
int m, int n, const double *scale_A, const double *scale_B, double *cross_term) {
//2-dimensional with double precision
ExpDist_tiled<double, 2>(A, B, m, n, scale_A, scale_B, cross_term);
}
template<typename T, int dim>
__device__ __forceinline__ T compute_expdist_block_shared(int tx, int ty, int i, int j,
T (&sh_A)[dim][block_size_x*tile_size_x],
T (&sh_B)[dim][block_size_y*tile_size_y],
T (&sh_scale_A)[block_size_x*tile_size_x],
T (&sh_scale_B)[block_size_y*tile_size_y], int m, int n) {
T cross_term = 0;
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
#pragma unroll
for (int tj=0; tj<tile_size_y; tj++) {
if ((i+ti*block_size_x < m) && (j+tj*block_size_y < n)) {
T dist_ij = 0;
#pragma unroll
for (int d=0; d<dim; d++) {
dist_ij += (sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y])*(sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y]);
}
cross_term += exp(-dist_ij/(sh_scale_A[tx+ti*block_size_x] + sh_scale_B[ty+tj*block_size_y]));
}
}
}
return cross_term;
}
template<typename T, int dim>
__device__ __forceinline__ T compute_expdist_block(int i, int j, const T *A, const T *B,
const T *scale_A, const T* scale_B, int m, int n) {
T cross_term = 0;
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
#pragma unroll
for (int tj=0; tj<tile_size_y; tj++) {
if ((i+ti*block_size_x < m) && (j+tj*block_size_y < n)) {
T dist_ij = 0;
#pragma unroll
for (int d=0; d<dim; d++) {
int id = dim*(i+ti*block_size_x) + d;
int jd = dim*(j+tj*block_size_y) + d;
dist_ij += (A[id]-B[jd])*(A[id]-B[jd]);
}
cross_term += exp(-dist_ij/(scale_A[i+ti*block_size_x] + scale_B[j+tj*block_size_y]));
}
}
}
return cross_term;
}
/*
* This function performs the main body of work for computing the Bhattacharya
* cost function for two given point sets.
* The parallelization is such that a 1D array of 2D thread blocks is created over
* the m-dimension. The thread blocks then iterate over n, to process the entire
* m*n iteration space. The amount of work per thread is controlled
* through tiling factors tile_size_x and tile_size_y.
* The cross term is reduced to a single value per thread block, which then needs
* to be reduced to a single value in a second kernel.
*/
template<typename T, int dim>
__device__ __forceinline__ void ExpDist_tiled_column(const T *A, const T *B,
int m, int n, const T *scale_A, const T *scale_B, T *d_cross_term) {
// Specialize BlockReduce for a 1D block of block_size_x threads on type T
typedef hipcub::BlockReduce<T, block_size_x, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduce;
// Allocate shared memory for BlockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
T cross_term = 0.0;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx + blockIdx.x * block_size_x * tile_size_x;
int j = ty + blockIdx.y * block_size_y * tile_size_y;
#if use_shared_mem == 1
__shared__ T sh_A[dim][block_size_x*tile_size_x];
__shared__ T sh_B[dim][block_size_y*tile_size_y];
__shared__ T sh_scale_A[block_size_x*tile_size_x];
__shared__ T sh_scale_B[block_size_y*tile_size_y];
#pragma unroll
for (int d=0; d<dim; d++) {
fill_shared_mem_tiled_1D<tile_size_x, dim*block_size_x, dim*block_size_x>(sh_A[d], A+d, tx, i, m);
}
fill_shared_mem_tiled_1D<tile_size_x, block_size_x, block_size_x>(sh_scale_A, scale_A, tx, i, m);
__syncthreads();
#endif
int step_size = gridDim.y * block_size_y * tile_size_y;
for (int sj = j; sj < n; sj += step_size) {
#if use_shared_mem == 1
for (int d=0; d<dim; d++) {
fill_shared_mem_tiled_1D<tile_size_y, dim*block_size_y, dim*block_size_y>(sh_B[d], B+d, ty, sj, n);
}
fill_shared_mem_tiled_1D<tile_size_y, block_size_y, block_size_y>(sh_scale_B, scale_B, ty, sj, n);
__syncthreads();
#endif
#if use_shared_mem == 0
cross_term += compute_expdist_block<double, 2>(i, sj, A, B, scale_A, scale_B, m, n);
#elif use_shared_mem == 1
cross_term += compute_expdist_block_shared<double, 2>(tx, ty, i, sj, sh_A, sh_B, sh_scale_A, sh_scale_B, m, n);
__syncthreads();
#endif
}
//reduce cross_term within the block
cross_term = BlockReduce(temp_storage).Sum(cross_term);
//write back the per-thread block partial cross term
if (tx == 0 && ty == 0) {
d_cross_term[blockIdx.y*gridDim.x+blockIdx.x] = cross_term;
}
}
extern "C"
__global__ void
ExpDist_column(const double *A, const double *B,
int m, int n, const double *scale_A, const double *scale_B, double *cross_term) {
//2-dimensional with double precision
ExpDist_tiled_column<double, 2>(A, B, m, n, scale_A, scale_B, cross_term);
}
#ifdef reduce_block_size
#define block_size reduce_block_size
#else
#define block_size block_size_x
#endif
/*
* Reduce the per thread block cross terms computed in the GaussTransform kernel to single value
*
* This kernel is designed to run as single-thread block, because the number of terms to reduce is
* of size n or m, which is expected to be around 2000 or so. The number of items to reduce
* is passed as the last argument 'nblocks', which corresponds to the number of thread blocks used
* by the first kernel.
*/
extern "C"
__global__ void reduce_cross_term(double *output, double *d_cross_term, int m, int n, int nblocks) {
int tx = threadIdx.x;
// Specialize BlockReduce for a 1D block of block_size threads on type double
typedef hipcub::BlockReduce<double, block_size> BlockReduce;
// Allocate shared memory for BlockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
double cross_term = 0.0;
for (int i=tx; i<nblocks; i+=block_size) {
cross_term += d_cross_term[i];
}
//reduce to single value within thread block
cross_term = BlockReduce(temp_storage).Sum(cross_term);
//thread 0 writes output
if (tx == 0) {
output[0] = cross_term;
}
}
| 7352e1c1ac60aa2d8e6147be3b03c3c498b479f5.cu | #include <cub/cub.cuh>
//defaults tuned for K40
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 4
#endif
#ifndef tile_size_x
#define tile_size_x 4
#endif
#ifndef tile_size_y
#define tile_size_y 2
#endif
#ifndef use_shared_mem
#define use_shared_mem 1
#endif
template <int tile_size, int sh_stride, int d_stride, typename T>
__device__ __forceinline__ void fill_shared_mem_tiled_1D(T* sh_mem, const T *d_mem, int sh_offset, int d_offset, int N) {
#pragma unroll
for (int ti=0; ti<tile_size; ti++) {
if (d_offset+ti*d_stride < N) {
sh_mem[sh_offset+ti*sh_stride] = d_mem[d_offset+ti*d_stride];
}
}
}
/*
* This function performs the main body of work for computing the Bhattacharya
* cost function for two given point sets.
* The parallelization is such that a 2D array of 2D thread blocks are created
* to match the m*n iteration space. The amount of work per thread is controlled
* through tiling factors tile_size_x and tile_size_y.
* The cross term is reduced to a single value per thread block, which then needs
* to be reduced to a single value in a second kernel.
*/
template<typename T, int dim>
__device__ __forceinline__ void ExpDist_tiled(const T *A, const T *B,
int m, int n, const T *scale_A, const T *scale_B, T *d_cross_term) {
// Specialize BlockReduce for a 1D block of block_size_x threads on type T
typedef cub::BlockReduce<T, block_size_x, cub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduce;
// Allocate shared memory for BlockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
T cross_term = 0.0;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx + blockIdx.x * block_size_x * tile_size_x;
int j = ty + blockIdx.y * block_size_y * tile_size_y;
#if use_shared_mem == 1
__shared__ T sh_A[dim][block_size_x*tile_size_x];
__shared__ T sh_B[dim][block_size_y*tile_size_y];
__shared__ T sh_scale_A[block_size_x*tile_size_x];
__shared__ T sh_scale_B[block_size_y*tile_size_y];
#pragma unroll
for (int d=0; d<dim; d++) {
fill_shared_mem_tiled_1D<tile_size_x, block_size_x, dim*block_size_x>(sh_A[d], A, tx, d + dim * i, dim * m);
fill_shared_mem_tiled_1D<tile_size_y, block_size_y, dim*block_size_y>(sh_B[d], B, ty, d + dim * j, dim * n);
}
fill_shared_mem_tiled_1D<tile_size_x, block_size_x, block_size_x>(sh_scale_A, scale_A, tx, i, m);
fill_shared_mem_tiled_1D<tile_size_y, block_size_y, block_size_y>(sh_scale_B, scale_B, ty, j, n);
__syncthreads();
#endif
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
#pragma unroll
for (int tj=0; tj<tile_size_y; tj++) {
if ((i+ti*block_size_x < m) && (j+tj*block_size_y < n)) {
T dist_ij = 0;
#if use_shared_mem == 0
#pragma unroll
for (int d=0; d<dim; d++) {
int id = dim * (i+ti*block_size_x) + d;
int jd = dim * (j+tj*block_size_y) + d;
dist_ij += (A[id]-B[jd])*(A[id]-B[jd]);
}
cross_term += exp(-dist_ij/(scale_A[i+ti*block_size_x] + scale_B[j+tj*block_size_y]));
#elif use_shared_mem == 1
#pragma unroll
for (int d=0; d<dim; d++) {
dist_ij += (sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y])*(sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y]);
}
cross_term += exp(-dist_ij/(sh_scale_A[tx+ti*block_size_x] + sh_scale_B[ty+tj*block_size_y]));
#endif
}
}
}
//reduce cross_term within the block
cross_term = BlockReduce(temp_storage).Sum(cross_term);
//write back the per-thread block partial cross term
if (tx == 0 && ty == 0) {
d_cross_term[blockIdx.y*gridDim.x+blockIdx.x] = cross_term;
}
}
extern "C"
__global__ void
ExpDist(const double *A, const double *B,
int m, int n, const double *scale_A, const double *scale_B, double *cross_term) {
//2-dimensional with double precision
ExpDist_tiled<double, 2>(A, B, m, n, scale_A, scale_B, cross_term);
}
template<typename T, int dim>
__device__ __forceinline__ T compute_expdist_block_shared(int tx, int ty, int i, int j,
T (&sh_A)[dim][block_size_x*tile_size_x],
T (&sh_B)[dim][block_size_y*tile_size_y],
T (&sh_scale_A)[block_size_x*tile_size_x],
T (&sh_scale_B)[block_size_y*tile_size_y], int m, int n) {
T cross_term = 0;
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
#pragma unroll
for (int tj=0; tj<tile_size_y; tj++) {
if ((i+ti*block_size_x < m) && (j+tj*block_size_y < n)) {
T dist_ij = 0;
#pragma unroll
for (int d=0; d<dim; d++) {
dist_ij += (sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y])*(sh_A[d][tx+ti*block_size_x]-sh_B[d][ty+tj*block_size_y]);
}
cross_term += exp(-dist_ij/(sh_scale_A[tx+ti*block_size_x] + sh_scale_B[ty+tj*block_size_y]));
}
}
}
return cross_term;
}
template<typename T, int dim>
__device__ __forceinline__ T compute_expdist_block(int i, int j, const T *A, const T *B,
const T *scale_A, const T* scale_B, int m, int n) {
T cross_term = 0;
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
#pragma unroll
for (int tj=0; tj<tile_size_y; tj++) {
if ((i+ti*block_size_x < m) && (j+tj*block_size_y < n)) {
T dist_ij = 0;
#pragma unroll
for (int d=0; d<dim; d++) {
int id = dim*(i+ti*block_size_x) + d;
int jd = dim*(j+tj*block_size_y) + d;
dist_ij += (A[id]-B[jd])*(A[id]-B[jd]);
}
cross_term += exp(-dist_ij/(scale_A[i+ti*block_size_x] + scale_B[j+tj*block_size_y]));
}
}
}
return cross_term;
}
/*
* This function performs the main body of work for computing the Bhattacharya
* cost function for two given point sets.
* The parallelization is such that a 1D array of 2D thread blocks is created over
* the m-dimension. The thread blocks then iterate over n, to process the entire
* m*n iteration space. The amount of work per thread is controlled
* through tiling factors tile_size_x and tile_size_y.
* The cross term is reduced to a single value per thread block, which then needs
* to be reduced to a single value in a second kernel.
*/
template<typename T, int dim>
__device__ __forceinline__ void ExpDist_tiled_column(const T *A, const T *B,
int m, int n, const T *scale_A, const T *scale_B, T *d_cross_term) {
// Specialize BlockReduce for a 1D block of block_size_x threads on type T
typedef cub::BlockReduce<T, block_size_x, cub::BLOCK_REDUCE_WARP_REDUCTIONS, block_size_y> BlockReduce;
// Allocate shared memory for BlockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
T cross_term = 0.0;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx + blockIdx.x * block_size_x * tile_size_x;
int j = ty + blockIdx.y * block_size_y * tile_size_y;
#if use_shared_mem == 1
__shared__ T sh_A[dim][block_size_x*tile_size_x];
__shared__ T sh_B[dim][block_size_y*tile_size_y];
__shared__ T sh_scale_A[block_size_x*tile_size_x];
__shared__ T sh_scale_B[block_size_y*tile_size_y];
#pragma unroll
for (int d=0; d<dim; d++) {
fill_shared_mem_tiled_1D<tile_size_x, dim*block_size_x, dim*block_size_x>(sh_A[d], A+d, tx, i, m);
}
fill_shared_mem_tiled_1D<tile_size_x, block_size_x, block_size_x>(sh_scale_A, scale_A, tx, i, m);
__syncthreads();
#endif
int step_size = gridDim.y * block_size_y * tile_size_y;
for (int sj = j; sj < n; sj += step_size) {
#if use_shared_mem == 1
for (int d=0; d<dim; d++) {
fill_shared_mem_tiled_1D<tile_size_y, dim*block_size_y, dim*block_size_y>(sh_B[d], B+d, ty, sj, n);
}
fill_shared_mem_tiled_1D<tile_size_y, block_size_y, block_size_y>(sh_scale_B, scale_B, ty, sj, n);
__syncthreads();
#endif
#if use_shared_mem == 0
cross_term += compute_expdist_block<double, 2>(i, sj, A, B, scale_A, scale_B, m, n);
#elif use_shared_mem == 1
cross_term += compute_expdist_block_shared<double, 2>(tx, ty, i, sj, sh_A, sh_B, sh_scale_A, sh_scale_B, m, n);
__syncthreads();
#endif
}
//reduce cross_term within the block
cross_term = BlockReduce(temp_storage).Sum(cross_term);
//write back the per-thread block partial cross term
if (tx == 0 && ty == 0) {
d_cross_term[blockIdx.y*gridDim.x+blockIdx.x] = cross_term;
}
}
extern "C"
__global__ void
ExpDist_column(const double *A, const double *B,
int m, int n, const double *scale_A, const double *scale_B, double *cross_term) {
//2-dimensional with double precision
ExpDist_tiled_column<double, 2>(A, B, m, n, scale_A, scale_B, cross_term);
}
#ifdef reduce_block_size
#define block_size reduce_block_size
#else
#define block_size block_size_x
#endif
/*
* Reduce the per thread block cross terms computed in the GaussTransform kernel to single value
*
* This kernel is designed to run as single-thread block, because the number of terms to reduce is
* of size n or m, which is expected to be around 2000 or so. The number of items to reduce
* is passed as the last argument 'nblocks', which corresponds to the number of thread blocks used
* by the first kernel.
*/
extern "C"
__global__ void reduce_cross_term(double *output, double *d_cross_term, int m, int n, int nblocks) {
int tx = threadIdx.x;
// Specialize BlockReduce for a 1D block of block_size threads on type double
typedef cub::BlockReduce<double, block_size> BlockReduce;
// Allocate shared memory for BlockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
double cross_term = 0.0;
for (int i=tx; i<nblocks; i+=block_size) {
cross_term += d_cross_term[i];
}
//reduce to single value within thread block
cross_term = BlockReduce(temp_storage).Sum(cross_term);
//thread 0 writes output
if (tx == 0) {
output[0] = cross_term;
}
}
|
a545962df19e46e73fa03df95f36c8c2c653618d.hip | // !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
#include <hip/hip_runtime.h>
//#include "hip/hip_runtime.h"
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
#define N 16
__device__ int index(int col, int row, int ord){
return (row *ord)+col;
}
__global__ void Transpose(int *c, const int *a){
int col = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
c[index(row,col,4)] = a[index(col, row, 4)] ;
}
int main()
{
const int arraySize = 16;
const int a[arraySize] = { 1, 2, 3, 4, 5 ,6,7,8,9,10,11,12,13,14,15,16};
int c[arraySize] = { 0 };
int *dev_a = 0;
int *dev_c = 0;
// Allocate GPU buffers for three vectors (one input, one output) .
hipMalloc((void**)&dev_c, arraySize * sizeof(int));
hipMalloc((void**)&dev_a, arraySize * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_a, a, arraySize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, arraySize * sizeof(int), hipMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
dim3 dimgrid(2, 2);
dim3 dimblock(2, 2);
//Transpose<<<dimgrid, dimblock>>>(dev_c, dev_a);
ESBMC_verify_kernel(Transpose,dimgrid,dimblock,dev_c,dev_a);
// Copy output vector from GPU buffer to host memory.
hipMemcpy(c, dev_c, arraySize * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
for (int i = 0; i < arraySize; i++){
printf("%d ",c[i]);
if(i<3)
assert(c[i+1]==c[i]+4);
}
return 0;
}
| a545962df19e46e73fa03df95f36c8c2c653618d.cu | #include <call_kernel.h>
#include <cuda.h>
//#include "cuda_runtime.h"
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
#define N 16
__device__ int index(int col, int row, int ord){
return (row *ord)+col;
}
__global__ void Transpose(int *c, const int *a){
int col = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
c[index(row,col,4)] = a[index(col, row, 4)] ;
}
int main()
{
const int arraySize = 16;
const int a[arraySize] = { 1, 2, 3, 4, 5 ,6,7,8,9,10,11,12,13,14,15,16};
int c[arraySize] = { 0 };
int *dev_a = 0;
int *dev_c = 0;
// Allocate GPU buffers for three vectors (one input, one output) .
cudaMalloc((void**)&dev_c, arraySize * sizeof(int));
cudaMalloc((void**)&dev_a, arraySize * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, arraySize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, arraySize * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
dim3 dimgrid(2, 2);
dim3 dimblock(2, 2);
//Transpose<<<dimgrid, dimblock>>>(dev_c, dev_a);
ESBMC_verify_kernel(Transpose,dimgrid,dimblock,dev_c,dev_a);
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, arraySize * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
for (int i = 0; i < arraySize; i++){
printf("%d ",c[i]);
if(i<3)
assert(c[i+1]==c[i]+4);
}
return 0;
}
|
8a7fb31ff3a86d591d15ecd22849f9b2cd8277cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "../headers/utils.h"
#include <iostream>
namespace utils{
int MaxNumberOfThreads( cudakmc::verbosity v, int devNumber){
hipDeviceProp_t properties;
CHECK_ERROR(hipGetDeviceProperties(&properties,devNumber));
int nOfDev = properties.maxThreadsPerMultiProcessor * properties.multiProcessorCount;
if(v == cudakmc::verbose)
std::cout<<"Maximum no. threads allowed:"<<nOfDev<<" for "<<properties.name<<std::endl;
return nOfDev;
}
}
| 8a7fb31ff3a86d591d15ecd22849f9b2cd8277cf.cu | #include "../headers/utils.h"
#include <iostream>
namespace utils{
int MaxNumberOfThreads( cudakmc::verbosity v, int devNumber){
cudaDeviceProp properties;
CHECK_ERROR(cudaGetDeviceProperties(&properties,devNumber));
int nOfDev = properties.maxThreadsPerMultiProcessor * properties.multiProcessorCount;
if(v == cudakmc::verbose)
std::cout<<"Maximum no. threads allowed:"<<nOfDev<<" for "<<properties.name<<std::endl;
return nOfDev;
}
}
|
1b440cce03d94c0dc381e3aa361aafa5e9ae4beb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUMROWS 8
#define NUMCOLS 8
#define idx(u, y, x) (u[y * NUMCOLS + x])
float* newArray(int rows, int cols) {
float* a = (float*)malloc(NUMROWS * NUMCOLS * sizeof(float));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
idx(a, i, j) = i*cols+j;
}
}
return a;
}
void printArray(float* a, int rows, int cols) {
for (int i=0; i<rows; i++) {
for (int j=0; j<cols; j++) {
printf("%.2f ", *(a + i*cols + j));
}
printf("\n");
}
printf("\n\n\n");
}
void matmul_host(float* a, float* b, float* c, int r1, int c1, int c2) {
for (int i = 0; i < r1; i++) {
for (int j = 0; j < c2; j++) {
float comp = 0.;
for (int k = 0; k < c1; k++) {
comp += a[i*c1+k] * b[k*c1+j];
}
idx(c, i, j) = comp;
}
}
}
__global__
void matmul(float* a, float* b, float* c, int r1, int c1, int c2) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i<r1 && j<c2) {
float comp = 0.;
for (int k = 0; k < c1; k++) {
comp += a[i*c1+k] * b[k*c1+j];
}
c[0] = 100;
c[1] = 200;
}
}
__global__ void gpu_matrix_mult(float *a,float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
int main(int argc, char** args) {
float* a = newArray(NUMROWS, NUMCOLS);
float* b = newArray(NUMROWS, NUMCOLS);
float* c = (float *) malloc(NUMROWS*NUMCOLS*sizeof(float));
float *d_x, *d_y, *d_z;
// hipMallocManaged is used to allocate unifies memory
// accessible through both the CPU and GPU
hipMalloc((void **)&d_x, NUMROWS*NUMCOLS*sizeof(float));
hipMalloc((void **)&d_y, NUMROWS*NUMCOLS*sizeof(float));
hipMalloc((void **)&d_z, NUMROWS*NUMCOLS*sizeof(float));
clock_t begin = clock();
hipMemcpy(d_x, a, NUMROWS*NUMCOLS*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, b, NUMROWS*NUMCOLS*sizeof(float), hipMemcpyHostToDevice);
int threads = 32;
dim3 dim_grid((NUMROWS+31)/threads, (NUMCOLS+31)/threads, 1);
dim3 dim_block(threads, threads, 1);
hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dim_grid), dim3(dim_block), 0, 0, d_x, d_y, d_z, NUMROWS, NUMCOLS, NUMCOLS);
hipMemcpy(c, d_z, NUMROWS*NUMCOLS*sizeof(float), hipMemcpyDeviceToHost);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed: %f seconds\n", time_spent);
printArray(c, NUMROWS, NUMCOLS);
begin = clock();
matmul_host(a, b, c, NUMROWS, NUMCOLS, NUMCOLS);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed: %f seconds\n", time_spent);
printArray(c, NUMROWS, NUMCOLS);
// Free memory
hipFree(d_x);
hipFree(d_y);
hipFree(d_y);
free(a); free(b); free(c);
}
| 1b440cce03d94c0dc381e3aa361aafa5e9ae4beb.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUMROWS 8
#define NUMCOLS 8
#define idx(u, y, x) (u[y * NUMCOLS + x])
float* newArray(int rows, int cols) {
float* a = (float*)malloc(NUMROWS * NUMCOLS * sizeof(float));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
idx(a, i, j) = i*cols+j;
}
}
return a;
}
void printArray(float* a, int rows, int cols) {
for (int i=0; i<rows; i++) {
for (int j=0; j<cols; j++) {
printf("%.2f ", *(a + i*cols + j));
}
printf("\n");
}
printf("\n\n\n");
}
void matmul_host(float* a, float* b, float* c, int r1, int c1, int c2) {
for (int i = 0; i < r1; i++) {
for (int j = 0; j < c2; j++) {
float comp = 0.;
for (int k = 0; k < c1; k++) {
comp += a[i*c1+k] * b[k*c1+j];
}
idx(c, i, j) = comp;
}
}
}
__global__
void matmul(float* a, float* b, float* c, int r1, int c1, int c2) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i<r1 && j<c2) {
float comp = 0.;
for (int k = 0; k < c1; k++) {
comp += a[i*c1+k] * b[k*c1+j];
}
c[0] = 100;
c[1] = 200;
}
}
__global__ void gpu_matrix_mult(float *a,float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
int main(int argc, char** args) {
float* a = newArray(NUMROWS, NUMCOLS);
float* b = newArray(NUMROWS, NUMCOLS);
float* c = (float *) malloc(NUMROWS*NUMCOLS*sizeof(float));
float *d_x, *d_y, *d_z;
// cudaMallocManaged is used to allocate unifies memory
// accessible through both the CPU and GPU
cudaMalloc((void **)&d_x, NUMROWS*NUMCOLS*sizeof(float));
cudaMalloc((void **)&d_y, NUMROWS*NUMCOLS*sizeof(float));
cudaMalloc((void **)&d_z, NUMROWS*NUMCOLS*sizeof(float));
clock_t begin = clock();
cudaMemcpy(d_x, a, NUMROWS*NUMCOLS*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, b, NUMROWS*NUMCOLS*sizeof(float), cudaMemcpyHostToDevice);
int threads = 32;
dim3 dim_grid((NUMROWS+31)/threads, (NUMCOLS+31)/threads, 1);
dim3 dim_block(threads, threads, 1);
gpu_matrix_mult<<<dim_grid, dim_block>>>(d_x, d_y, d_z, NUMROWS, NUMCOLS, NUMCOLS);
cudaMemcpy(c, d_z, NUMROWS*NUMCOLS*sizeof(float), cudaMemcpyDeviceToHost);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed: %f seconds\n", time_spent);
printArray(c, NUMROWS, NUMCOLS);
begin = clock();
matmul_host(a, b, c, NUMROWS, NUMCOLS, NUMCOLS);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed: %f seconds\n", time_spent);
printArray(c, NUMROWS, NUMCOLS);
// Free memory
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_y);
free(a); free(b); free(c);
}
|
3e9bbf7704494c1718b9ae14eabb162194a3f2a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "latte/util/math_functions.h"
#include "latte/common.h"
#include "latte/util/device_alternate.h"
namespace latte {
void latte_gpu_memcpy(const size_t N, const void *X, void *Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault));
}
}
void latte_gpu_memset(const size_t N, const int alpha, void *X) {
CUDA_CHECK(hipMemset(X, alpha, N)); // NOLINT
}
template <>
void latte_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, int K, const float alpha,
const float *A, const float *B, const float beta,
float *C) {
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Latte::cublas_handle(), cuTransB, cuTransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void latte_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, const int K, const double alpha,
const double *A, const double *B, const double beta,
double *C) {
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Latte::cublas_handle(), cuTransB, cuTransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void latte_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float *A,
const float *x, const float beta, float *y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Latte::cublas_handle(), cuTransA, N, M, &alpha, A, N,
x, 1, &beta, y, 1));
}
template <>
void latte_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double *A,
const double *x, const double beta, double *y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Latte::cublas_handle(), cuTransA, N, M, &alpha, A, N,
x, 1, &beta, y, 1));
}
template <>
void latte_gpu_axpy<float>(const int N, const float alpha, const float *X,
float *Y) {
CUBLAS_CHECK(hipblasSaxpy(Latte::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void latte_gpu_axpy<double>(const int N, const double alpha, const double *X,
double *Y) {
CUBLAS_CHECK(hipblasDaxpy(Latte::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void latte_gpu_dot<float>(const int n, const float *x, const float *y,
float *out) {
CUBLAS_CHECK(hipblasSdot(Latte::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void latte_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Latte::cublas_handle(), N, &alpha, X, 1));
}
template <>
void latte_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Latte::cublas_handle(), N, &alpha, X, 1));
}
template <>
void latte_gpu_axpby<float>(const int N, const float alpha, const float *X,
const float beta, float *Y) {
latte_gpu_scal<float>(N, beta, Y);
latte_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void latte_gpu_axpby<double>(const int N, const double alpha, const double *X,
const double beta, double *Y) {
latte_gpu_scal<double>(N, beta, Y);
latte_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void latte_gpu_dot<double>(const int n, const double *x, const double *y,
double *out) {
CUBLAS_CHECK(hipblasDdot(Latte::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void latte_gpu_asum<float>(const int n, const float *x, float *y) {
CUBLAS_CHECK(hipblasSasum(Latte::cublas_handle(), n, x, 1, y));
}
template <>
void latte_gpu_asum<double>(const int n, const double *x, double *y) {
CUBLAS_CHECK(hipblasDasum(Latte::cublas_handle(), n, x, 1, y));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void latte_gpu_set(const int N, const Dtype alpha, Dtype *Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N));
return;
}
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(LATTE_GET_BLOCKS(N)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void latte_gpu_set<int>(const int N, const int alpha, int *Y);
template void latte_gpu_set<float>(const int N, const float alpha, float *Y);
template void latte_gpu_set<double>(const int N, const double alpha, double *Y);
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <typename Dtype>
void latte_gpu_add(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
hipLaunchKernelGGL(( add_kernel<Dtype>), dim3(LATTE_GET_BLOCKS(N)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0, N, a, b, y);
}
template void latte_gpu_add<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_add<double>(const int N, const double *a, const double *b, double *y);
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <typename Dtype>
void latte_gpu_sub(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
hipLaunchKernelGGL(( sub_kernel<Dtype>), dim3(LATTE_GET_BLOCKS(N)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0, N, a, b, y);
}
template void latte_gpu_sub<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_sub<double>(const int N, const double *a, const double *b, double *y);
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <typename Dtype>
void latte_gpu_mul(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
hipLaunchKernelGGL(( mul_kernel<Dtype>), dim3(LATTE_GET_BLOCKS(N)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0, N, a, b, y);
}
template void latte_gpu_mul<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_mul<double>(const int N, const double *a, const double *b, double *y);
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <typename Dtype>
void latte_gpu_div(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
hipLaunchKernelGGL(( div_kernel<Dtype>), dim3(LATTE_GET_BLOCKS(N)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0, N, a, b, y);
}
template void latte_gpu_div<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_div<double>(const int N, const double *a, const double *b, double *y);
} // namespace latte | 3e9bbf7704494c1718b9ae14eabb162194a3f2a8.cu | #include <cuda_runtime.h>
#include "latte/util/math_functions.h"
#include "latte/common.h"
#include "latte/util/device_alternate.h"
namespace latte {
void latte_gpu_memcpy(const size_t N, const void *X, void *Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault));
}
}
void latte_gpu_memset(const size_t N, const int alpha, void *X) {
CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT
}
template <>
void latte_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, int K, const float alpha,
const float *A, const float *B, const float beta,
float *C) {
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Latte::cublas_handle(), cuTransB, cuTransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void latte_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, const int K, const double alpha,
const double *A, const double *B, const double beta,
double *C) {
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Latte::cublas_handle(), cuTransB, cuTransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void latte_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float *A,
const float *x, const float beta, float *y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Latte::cublas_handle(), cuTransA, N, M, &alpha, A, N,
x, 1, &beta, y, 1));
}
template <>
void latte_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double *A,
const double *x, const double beta, double *y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Latte::cublas_handle(), cuTransA, N, M, &alpha, A, N,
x, 1, &beta, y, 1));
}
template <>
void latte_gpu_axpy<float>(const int N, const float alpha, const float *X,
float *Y) {
CUBLAS_CHECK(cublasSaxpy(Latte::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void latte_gpu_axpy<double>(const int N, const double alpha, const double *X,
double *Y) {
CUBLAS_CHECK(cublasDaxpy(Latte::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void latte_gpu_dot<float>(const int n, const float *x, const float *y,
float *out) {
CUBLAS_CHECK(cublasSdot(Latte::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void latte_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Latte::cublas_handle(), N, &alpha, X, 1));
}
template <>
void latte_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Latte::cublas_handle(), N, &alpha, X, 1));
}
template <>
void latte_gpu_axpby<float>(const int N, const float alpha, const float *X,
const float beta, float *Y) {
latte_gpu_scal<float>(N, beta, Y);
latte_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void latte_gpu_axpby<double>(const int N, const double alpha, const double *X,
const double beta, double *Y) {
latte_gpu_scal<double>(N, beta, Y);
latte_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void latte_gpu_dot<double>(const int n, const double *x, const double *y,
double *out) {
CUBLAS_CHECK(cublasDdot(Latte::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void latte_gpu_asum<float>(const int n, const float *x, float *y) {
CUBLAS_CHECK(cublasSasum(Latte::cublas_handle(), n, x, 1, y));
}
template <>
void latte_gpu_asum<double>(const int n, const double *x, double *y) {
CUBLAS_CHECK(cublasDasum(Latte::cublas_handle(), n, x, 1, y));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void latte_gpu_set(const int N, const Dtype alpha, Dtype *Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N));
return;
}
set_kernel<Dtype><<<LATTE_GET_BLOCKS(N), LATTE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void latte_gpu_set<int>(const int N, const int alpha, int *Y);
template void latte_gpu_set<float>(const int N, const float alpha, float *Y);
template void latte_gpu_set<double>(const int N, const double alpha, double *Y);
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <typename Dtype>
void latte_gpu_add(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
add_kernel<Dtype><<<LATTE_GET_BLOCKS(N), LATTE_CUDA_NUM_THREADS>>>(N, a, b, y);
}
template void latte_gpu_add<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_add<double>(const int N, const double *a, const double *b, double *y);
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <typename Dtype>
void latte_gpu_sub(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
sub_kernel<Dtype><<<LATTE_GET_BLOCKS(N), LATTE_CUDA_NUM_THREADS>>>(N, a, b, y);
}
template void latte_gpu_sub<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_sub<double>(const int N, const double *a, const double *b, double *y);
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <typename Dtype>
void latte_gpu_mul(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
mul_kernel<Dtype><<<LATTE_GET_BLOCKS(N), LATTE_CUDA_NUM_THREADS>>>(N, a, b, y);
}
template void latte_gpu_mul<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_mul<double>(const int N, const double *a, const double *b, double *y);
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype *a,
const Dtype *b, Dtype *y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <typename Dtype>
void latte_gpu_div(const int N, const Dtype *a, const Dtype *b, Dtype *y) {
div_kernel<Dtype><<<LATTE_GET_BLOCKS(N), LATTE_CUDA_NUM_THREADS>>>(N, a, b, y);
}
template void latte_gpu_div<float>(const int N, const float *a, const float *b, float *y);
template void latte_gpu_div<double>(const int N, const double *a, const double *b, double *y);
} // namespace latte |
0b0351d8a3386e33c47d06f8c1d774653a3f2500.hip | // !!! This is a file automatically generated by hipify!!!
// Solve the Laplace equation on a 2D lattice with boundary conditions.
//
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
// field variables
float* h_new; // host field vectors
float* h_old;
float* h_C; // result of diff*diff of each block
float* g_new;
float* d_new; // device field vectors
float* d_old;
float* d_C;
int MAX=1000000; // maximum iterations
double eps=1.0e-10; // stopping criterion
__global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag)
{
extern __shared__ float cache[];
float t, l, r, b; // top, left, right, bottom
float diff;
int site, ym1, xm1, xp1, yp1;
int Nx = blockDim.x*gridDim.x;
int Ny = blockDim.y*gridDim.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x;
site = x + y*Nx;
if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) ) {
diff = 0.0;
}
else {
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
if(flag) {
b = phi_old[ym1];
l = phi_old[xm1];
r = phi_old[xp1];
t = phi_old[yp1];
phi_new[site] = 0.25*(b+l+r+t);
}
else {
b = phi_new[ym1];
l = phi_new[xm1];
r = phi_new[xp1];
t = phi_new[yp1];
phi_old[site] = 0.25*(b+l+r+t);
}
diff = phi_new[site]-phi_old[site];
}
cache[cacheIndex]=diff*diff;
__syncthreads();
// perform parallel reduction
int ib = blockDim.x*blockDim.y/2;
while (ib != 0) {
if(cacheIndex < ib)
cache[cacheIndex] += cache[cacheIndex + ib];
__syncthreads();
ib /=2;
}
int blockIndex = blockIdx.x + gridDim.x*blockIdx.y;
if(cacheIndex == 0) C[blockIndex] = cache[0];
}
int op(int gid, int CPU, int Nx, int Ny, int tx, int ty, std::ofstream& myfile)
{
int iter;
volatile bool flag; // to toggle between *_new and *_old
float cputime;
float gputime;
float gputime_tot;
double flops;
double error;
dim3 threads(tx,ty);
// The total number of threads in the grid is equal to the total number of lattice sites
int bx = Nx/tx;
if(bx*tx != Nx) {
printf("The block size in x is incorrect\n");
exit(0);
}
int by = Ny/ty;
if(by*ty != Ny) {
printf("The block size in y is incorrect\n");
exit(0);
}
if((bx > 65535)||(by > 65535)) {
printf("The grid size exceeds the limit ! \n");
exit(0);
}
dim3 blocks(bx,by);
printf("The dimension of the grid is (%d, %d)\n",bx,by);
// Allocate field vector h_phi in host memory
int N = Nx*Ny;
int size = N*sizeof(float);
int sb = bx*by*sizeof(float);
h_old = (float*)malloc(size);
h_new = (float*)malloc(size);
g_new = (float*)malloc(size);
h_C = (float*)malloc(sb);
memset(h_old, 0, size);
memset(h_new, 0, size);
// Initialize the field vector with boundary conditions
for(int x=0; x<Nx; x++) {
h_new[x+Nx*(Ny-1)]=1.0;
h_old[x+Nx*(Ny-1)]=1.0;
}
FILE *out1; // save initial configuration in phi_initial.dat
out1 = fopen("phi_initial.dat","w");
fprintf(out1, "Inital field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(out1,"%.2e ",h_new[i+j*Nx]);
}
fprintf(out1,"\n");
}
fclose(out1);
// printf("\n");
// printf("Inital field configuration:\n");
// for(int j=Ny-1;j>-1;j--) {
// for(int i=0; i<Nx; i++) {
// printf("%.2e ",h_new[i+j*Nx]);
// }
// printf("\n");
// }
printf("\n");
// create the timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if(CPU>0) {
// start the timer
hipEventRecord(start,0);
// Allocate vectors in device memory
hipMalloc((void**)&d_new, size);
hipMalloc((void**)&d_old, size);
hipMalloc((void**)&d_C, sb);
// Copy vectors from host memory to device memory
hipMemcpy(d_new, h_new, size, hipMemcpyHostToDevice);
hipMemcpy(d_old, h_old, size, hipMemcpyHostToDevice);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float Intime;
hipEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
hipEventRecord(start,0);
error = 10*eps; // any value bigger than eps is OK
iter = 0; // counter for iterations
flag = true;
int sm = tx*ty*sizeof(float); // size of the shared memory in each block
while ( (error > eps) && (iter < MAX) ) {
hipLaunchKernelGGL(( laplacian), dim3(blocks),dim3(threads),sm, 0, d_old, d_new, d_C, flag);
hipMemcpy(h_C, d_C, sb, hipMemcpyDeviceToHost);
error = 0.0;
for(int i=0; i<bx*by; i++) {
error = error + h_C[i];
}
error = sqrt(error);
// printf("error = %.15e\n",error);
// printf("iteration = %d\n",iter);
iter++;
flag = !flag;
}
printf("error (GPU) = %.15e\n",error);
printf("total iterations (GPU) = %d\n",iter);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
flops = 7.0*(Nx-2)*(Ny-2)*iter;
printf("GPU Gflops: %f\n",flops/(1000000.0*gputime));
// Copy result from device memory to host memory
// start the timer
hipEventRecord(start,0);
hipMemcpy(g_new, d_new, size, hipMemcpyDeviceToHost);
hipFree(d_new);
hipFree(d_old);
hipFree(d_C);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float Outime;
hipEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
fflush(stdout);
FILE *outg; // save GPU solution in phi_GPU.dat
outg = fopen("phi_GPU.dat","w");
fprintf(outg, "GPU field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(outg,"%.2e ",g_new[i+j*Nx]);
}
fprintf(outg,"\n");
}
fclose(outg);
// printf("\n");
// printf("Final field configuration (GPU):\n");
// for(int j=Ny-1;j>-1;j--) {
// for(int i=0; i<Nx; i++) {
// printf("%.2e ",g_new[i+j*Nx]);
// }
// printf("\n");
// }
printf("\n");
}
if(CPU==1) { // not to compute the CPU solution
free(h_new);
free(h_old);
free(g_new);
free(h_C);
hipDeviceReset();
exit(0);
}
if((CPU==0)||(CPU==2)) { // to compute the CPU solution
// start the timer
hipEventRecord(start,0);
// to compute the reference solution
error = 10*eps; // any value bigger than eps
iter = 0; // counter for iterations
flag = true;
double diff;
float t, l, r, b; // top, left, right, bottom
int site, ym1, xm1, xp1, yp1;
while ( (error > eps) && (iter < MAX) ) {
if(flag) {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_old[ym1];
l = h_old[xm1];
r = h_old[xp1];
t = h_old[yp1];
h_new[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
else {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_new[ym1];
l = h_new[xm1];
r = h_new[xp1];
t = h_new[yp1];
h_old[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
flag = !flag;
iter++;
error = sqrt(error);
// printf("error = %.15e\n",error);
// printf("iteration = %d\n",iter);
} // exit if error < eps
printf("error (CPU) = %.15e\n",error);
printf("total iterations (CPU) = %d\n",iter);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
flops = 7.0*(Nx-2)*(Ny-2)*iter;
printf("CPU Gflops: %lf\n",flops/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
fflush(stdout);
myfile<<Nx<<"x"<<Ny<<","<<tx<<"x"<<ty<<","<<gputime<<","<<gputime_tot<<","<<error<<","<<cputime<<","<<(cputime/gputime_tot)<<std::endl;
// destroy the timer
hipEventDestroy(start);
hipEventDestroy(stop);
FILE *outc; // save CPU solution in phi_CPU.dat
outc = fopen("phi_CPU.dat","w");
fprintf(outc, "CPU field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(outc,"%.2e ",h_new[i+j*Nx]);
}
fprintf(outc,"\n");
}
fclose(outc);
// printf("\n");
// printf("Final field configuration (CPU):\n");
// for(int j=Ny-1;j>-1;j--) {
// for(int i=0; i<Nx; i++) {
// printf("%.2e ",h_new[i+j*Nx]);
// }
// printf("\n");
// }
printf("\n");
free(h_new);
free(h_old);
free(g_new);
free(h_C);
}
hipDeviceReset();
}
int main(void)
{
int gid; // GPU_ID
std::ofstream myfile;
myfile.open("Output.csv");
myfile<<"lattice_sizes"<<","<<"block sizes"<<","<<"gputime"<<","<<"gputime_tot"<<","<<"diff"<<","<<"cputime"<<","<<"savetime"<<std::endl;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n",gid);
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
err = hipSetDevice(gid);
if (err != hipSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Select GPU with device ID = %d\n", gid);
hipSetDevice(gid);
printf("Solve Laplace equation on a 2D lattice with boundary conditions\n");
/*
int Nx,Ny; // lattice size
printf("Enter the size (Nx, Ny) of the 2D lattice: ");
scanf("%d %d",&Nx,&Ny);
printf("%d %d\n",Nx,Ny);
// Set the number of threads (tx,ty) per block
int tx,ty;
printf("Enter the number of threads (tx,ty) per block: ");
scanf("%d %d",&tx, &ty);
printf("%d %d\n",tx, ty);
if( tx*ty > 1024 ) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(0);
}
*/
int CPU;
printf("To compute the solution vector with CPU/GPU/both (0/1/2) ? ");
scanf("%d",&CPU);
printf("%d\n",CPU);
fflush(stdout);
int Ns[4] = {32, 64, 128, 256};
int ts[4] = {4, 8, 16, 32};
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%d %d\n",i, j);
op(gid, CPU, Ns[i], Ns[i], ts[j], ts[j], myfile);
}
}
}
| 0b0351d8a3386e33c47d06f8c1d774653a3f2500.cu | // Solve the Laplace equation on a 2D lattice with boundary conditions.
//
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
// field variables
float* h_new; // host field vectors
float* h_old;
float* h_C; // result of diff*diff of each block
float* g_new;
float* d_new; // device field vectors
float* d_old;
float* d_C;
int MAX=1000000; // maximum iterations
double eps=1.0e-10; // stopping criterion
__global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag)
{
extern __shared__ float cache[];
float t, l, r, b; // top, left, right, bottom
float diff;
int site, ym1, xm1, xp1, yp1;
int Nx = blockDim.x*gridDim.x;
int Ny = blockDim.y*gridDim.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x;
site = x + y*Nx;
if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) ) {
diff = 0.0;
}
else {
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
if(flag) {
b = phi_old[ym1];
l = phi_old[xm1];
r = phi_old[xp1];
t = phi_old[yp1];
phi_new[site] = 0.25*(b+l+r+t);
}
else {
b = phi_new[ym1];
l = phi_new[xm1];
r = phi_new[xp1];
t = phi_new[yp1];
phi_old[site] = 0.25*(b+l+r+t);
}
diff = phi_new[site]-phi_old[site];
}
cache[cacheIndex]=diff*diff;
__syncthreads();
// perform parallel reduction
int ib = blockDim.x*blockDim.y/2;
while (ib != 0) {
if(cacheIndex < ib)
cache[cacheIndex] += cache[cacheIndex + ib];
__syncthreads();
ib /=2;
}
int blockIndex = blockIdx.x + gridDim.x*blockIdx.y;
if(cacheIndex == 0) C[blockIndex] = cache[0];
}
int op(int gid, int CPU, int Nx, int Ny, int tx, int ty, std::ofstream& myfile)
{
int iter;
volatile bool flag; // to toggle between *_new and *_old
float cputime;
float gputime;
float gputime_tot;
double flops;
double error;
dim3 threads(tx,ty);
// The total number of threads in the grid is equal to the total number of lattice sites
int bx = Nx/tx;
if(bx*tx != Nx) {
printf("The block size in x is incorrect\n");
exit(0);
}
int by = Ny/ty;
if(by*ty != Ny) {
printf("The block size in y is incorrect\n");
exit(0);
}
if((bx > 65535)||(by > 65535)) {
printf("The grid size exceeds the limit ! \n");
exit(0);
}
dim3 blocks(bx,by);
printf("The dimension of the grid is (%d, %d)\n",bx,by);
// Allocate field vector h_phi in host memory
int N = Nx*Ny;
int size = N*sizeof(float);
int sb = bx*by*sizeof(float);
h_old = (float*)malloc(size);
h_new = (float*)malloc(size);
g_new = (float*)malloc(size);
h_C = (float*)malloc(sb);
memset(h_old, 0, size);
memset(h_new, 0, size);
// Initialize the field vector with boundary conditions
for(int x=0; x<Nx; x++) {
h_new[x+Nx*(Ny-1)]=1.0;
h_old[x+Nx*(Ny-1)]=1.0;
}
FILE *out1; // save initial configuration in phi_initial.dat
out1 = fopen("phi_initial.dat","w");
fprintf(out1, "Inital field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(out1,"%.2e ",h_new[i+j*Nx]);
}
fprintf(out1,"\n");
}
fclose(out1);
// printf("\n");
// printf("Inital field configuration:\n");
// for(int j=Ny-1;j>-1;j--) {
// for(int i=0; i<Nx; i++) {
// printf("%.2e ",h_new[i+j*Nx]);
// }
// printf("\n");
// }
printf("\n");
// create the timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(CPU>0) {
// start the timer
cudaEventRecord(start,0);
// Allocate vectors in device memory
cudaMalloc((void**)&d_new, size);
cudaMalloc((void**)&d_old, size);
cudaMalloc((void**)&d_C, sb);
// Copy vectors from host memory to device memory
cudaMemcpy(d_new, h_new, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_old, h_old, size, cudaMemcpyHostToDevice);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Intime;
cudaEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
cudaEventRecord(start,0);
error = 10*eps; // any value bigger than eps is OK
iter = 0; // counter for iterations
flag = true;
int sm = tx*ty*sizeof(float); // size of the shared memory in each block
while ( (error > eps) && (iter < MAX) ) {
laplacian<<<blocks,threads,sm>>>(d_old, d_new, d_C, flag);
cudaMemcpy(h_C, d_C, sb, cudaMemcpyDeviceToHost);
error = 0.0;
for(int i=0; i<bx*by; i++) {
error = error + h_C[i];
}
error = sqrt(error);
// printf("error = %.15e\n",error);
// printf("iteration = %d\n",iter);
iter++;
flag = !flag;
}
printf("error (GPU) = %.15e\n",error);
printf("total iterations (GPU) = %d\n",iter);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
flops = 7.0*(Nx-2)*(Ny-2)*iter;
printf("GPU Gflops: %f\n",flops/(1000000.0*gputime));
// Copy result from device memory to host memory
// start the timer
cudaEventRecord(start,0);
cudaMemcpy(g_new, d_new, size, cudaMemcpyDeviceToHost);
cudaFree(d_new);
cudaFree(d_old);
cudaFree(d_C);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Outime;
cudaEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
fflush(stdout);
FILE *outg; // save GPU solution in phi_GPU.dat
outg = fopen("phi_GPU.dat","w");
fprintf(outg, "GPU field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(outg,"%.2e ",g_new[i+j*Nx]);
}
fprintf(outg,"\n");
}
fclose(outg);
// printf("\n");
// printf("Final field configuration (GPU):\n");
// for(int j=Ny-1;j>-1;j--) {
// for(int i=0; i<Nx; i++) {
// printf("%.2e ",g_new[i+j*Nx]);
// }
// printf("\n");
// }
printf("\n");
}
if(CPU==1) { // not to compute the CPU solution
free(h_new);
free(h_old);
free(g_new);
free(h_C);
cudaDeviceReset();
exit(0);
}
if((CPU==0)||(CPU==2)) { // to compute the CPU solution
// start the timer
cudaEventRecord(start,0);
// to compute the reference solution
error = 10*eps; // any value bigger than eps
iter = 0; // counter for iterations
flag = true;
double diff;
float t, l, r, b; // top, left, right, bottom
int site, ym1, xm1, xp1, yp1;
while ( (error > eps) && (iter < MAX) ) {
if(flag) {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_old[ym1];
l = h_old[xm1];
r = h_old[xp1];
t = h_old[yp1];
h_new[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
else {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_new[ym1];
l = h_new[xm1];
r = h_new[xp1];
t = h_new[yp1];
h_old[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
flag = !flag;
iter++;
error = sqrt(error);
// printf("error = %.15e\n",error);
// printf("iteration = %d\n",iter);
} // exit if error < eps
printf("error (CPU) = %.15e\n",error);
printf("total iterations (CPU) = %d\n",iter);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
flops = 7.0*(Nx-2)*(Ny-2)*iter;
printf("CPU Gflops: %lf\n",flops/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
fflush(stdout);
myfile<<Nx<<"x"<<Ny<<","<<tx<<"x"<<ty<<","<<gputime<<","<<gputime_tot<<","<<error<<","<<cputime<<","<<(cputime/gputime_tot)<<std::endl;
// destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
FILE *outc; // save CPU solution in phi_CPU.dat
outc = fopen("phi_CPU.dat","w");
fprintf(outc, "CPU field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(outc,"%.2e ",h_new[i+j*Nx]);
}
fprintf(outc,"\n");
}
fclose(outc);
// printf("\n");
// printf("Final field configuration (CPU):\n");
// for(int j=Ny-1;j>-1;j--) {
// for(int i=0; i<Nx; i++) {
// printf("%.2e ",h_new[i+j*Nx]);
// }
// printf("\n");
// }
printf("\n");
free(h_new);
free(h_old);
free(g_new);
free(h_C);
}
cudaDeviceReset();
}
int main(void)
{
int gid; // GPU_ID
std::ofstream myfile;
myfile.open("Output.csv");
myfile<<"lattice_sizes"<<","<<"block sizes"<<","<<"gputime"<<","<<"gputime_tot"<<","<<"diff"<<","<<"cputime"<<","<<"savetime"<<std::endl;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n",gid);
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
err = cudaSetDevice(gid);
if (err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Select GPU with device ID = %d\n", gid);
cudaSetDevice(gid);
printf("Solve Laplace equation on a 2D lattice with boundary conditions\n");
/*
int Nx,Ny; // lattice size
printf("Enter the size (Nx, Ny) of the 2D lattice: ");
scanf("%d %d",&Nx,&Ny);
printf("%d %d\n",Nx,Ny);
// Set the number of threads (tx,ty) per block
int tx,ty;
printf("Enter the number of threads (tx,ty) per block: ");
scanf("%d %d",&tx, &ty);
printf("%d %d\n",tx, ty);
if( tx*ty > 1024 ) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(0);
}
*/
int CPU;
printf("To compute the solution vector with CPU/GPU/both (0/1/2) ? ");
scanf("%d",&CPU);
printf("%d\n",CPU);
fflush(stdout);
int Ns[4] = {32, 64, 128, 256};
int ts[4] = {4, 8, 16, 32};
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%d %d\n",i, j);
op(gid, CPU, Ns[i], Ns[i], ts[j], ts[j], myfile);
}
}
}
|
781b9eeafd2dc3c78c2a3d92a841d853c2feb13e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zmconjugate.cu, normal z -> d, Mon Jun 25 18:24:25 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
__global__ void
magma_dmconjugate_kernel(
int num_rows,
magma_index_t *rowptr,
double *values )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i = rowptr[row]; i < rowptr[row+1]; i++){
values[i] = MAGMA_D_CONJ( values[i] );
}
}
}
/**
Purpose
-------
This function conjugates a matrix. For a real matrix, no value is changed.
Arguments
---------
@param[in,out]
A magma_d_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmconjugate(
magma_d_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE ));
hipLaunchKernelGGL(( magma_dmconjugate_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
A->num_rows, A->drow, A->dval );
return info;
}
| 781b9eeafd2dc3c78c2a3d92a841d853c2feb13e.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zmconjugate.cu, normal z -> d, Mon Jun 25 18:24:25 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
__global__ void
magma_dmconjugate_kernel(
int num_rows,
magma_index_t *rowptr,
double *values )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i = rowptr[row]; i < rowptr[row+1]; i++){
values[i] = MAGMA_D_CONJ( values[i] );
}
}
}
/**
Purpose
-------
This function conjugates a matrix. For a real matrix, no value is changed.
Arguments
---------
@param[in,out]
A magma_d_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmconjugate(
magma_d_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE ));
magma_dmconjugate_kernel<<< grid, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( A->num_rows, A->drow, A->dval );
return info;
}
|
8417d696cdc1cf5435d515d3093e9c8558df4a95.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <time.h>
#include <string>
#include <vector>
#include <sstream>
#include <hip/hip_runtime.h>
#include <math.h>
#include <fstream> // Libreria para leer archivos
#include <typeinfo> // for 'typeid' to work
#include <tuple>
using namespace std;
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Funciones de apoyo */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
__constant__ int HitoriCM[100*100]; //FIXME: Cambiar cuando se actualice N y M
void funcionQL(string* Hitori_Str, int* Hit_State, int N);
// Funcin para Splitear un String
void tokenize(string const &str, const char delim, vector<string> &out) {
// construct a stream from the string
stringstream ss(str);
string s;
while (getline(ss, s, delim)) {
out.push_back(s);
}
return;
}
/*
* Impresor de Matrix 2D - Almacenada linealmente
*/
void showMatrix(int *matrix, int N, int M) {
for(int j = 0; j < M; j++){
for(int i = 0; i < N; i++)
printf("%d ", matrix[i + j*N]);
printf("\n");
}
printf("\n");
}
void showMatrix(string* matrix, int N, int M) {
for(int j = 0; j < M; j++){
for(int i = 0; i < N; i++)
if(matrix[i + j*N].size() == 2) cout << matrix[i + j*N] << " ";
else if(matrix[i + j*N].size() == 1) cout << matrix[i + j*N] << " ";
printf("\n");
}
printf("\n");
}
/*
void showMatrix(string* matrix, int N, int M) {
for(int j = 0; j < M; j++){
for(int i = 0; i < N; i++)
cout << matrix[i + j*N] << " ";
printf("\n");
}
printf("\n");
}*/
void readHitoriFromFile(fstream* FILE, int* matrixH, string* matrixHstr, int N){
int i, j = 0;
const char delim = ' ';
string line;
vector<string> row;
while( getline(*FILE, line)){
tokenize(line, delim, row);
for(i = 0; i < N ; i++){
matrixHstr[j] = row[i];
matrixH[j++] = stoi(row[i]);
}
// Limpiar el buffer de salida
row.clear();
}
}
/*
1 -> not multiple
2 -> multiple per row
3 -> multiple per column
4 -> multiple per row and column
5 -> not paintable
6 -> paintable // Eliminado
*/
// tuple (elem , posElem)
vector<tuple<int , int>> getRemainingMultiples(int* Hit_State, int N){
int i,j;
int elem;
int posElem;
vector<tuple<int, int>> M;
tuple<int, int> tup;
/*
1 -> not multiple
2 -> multiple per row
3 -> multiple per column
4 -> multiple per row and column
5 -> not paintable
6 -> paintable // Eliminado
*/
for(j = 0; j < N; j++ ){
for(i = 0; i < N; i++){
posElem = i + j*N;
elem = Hit_State[posElem];
tup = make_tuple(elem,posElem);
switch(elem) {
case 2:
M.push_back(tup);
break;
case 3:
M.push_back(tup);
break;
case 4:
M.push_back(tup);
break;
default:
break;
}
}
}
return M;
}
/*
Funcin para consistencia del Hitori
Lo que est funcin hace es mirar si dos multiples
en la misma columna o fila tienen el mismo nmero y si
ambos son not paintable (5).
*/
bool isRule4Conform(int* Hit_State, int N){
int i;
vector<tuple<int, int>> M = getRemainingMultiples(Hit_State, N);
for( i = 0; i < M.size() ; i++){
}
return true;
}
/*
Ejecutar cada vez que un multiplo es pintado (6)
1. Setear todas las celdas adyacentes al mltiplo pintado.
2.
*/
bool StandardCyclePattern(int* Hitori, int* Hit_State, int N){
// Comprueba Regla 4:
// return isRule4Conform(Hit_State, N);
return true;
}
void copyHitoriToHitori(int* Hit_State, int* Hit_StateAux, int N){
int i, j;
for(j = 0; j < N; j++)
for( i = 0; j < N; j++)
Hit_StateAux[i + j*N] = Hit_State[i + j*N];
}
void setNotPaintable(int* Hit_State, tuple<int, int> tup ){
Hit_State[ get<0>(tup) ] = 5;
}
void paint(int* Hit_State, tuple<int, int> tup){
Hit_State[ get<0>(tup)] = 6;
return;
}
void setInitialHitoriState(int *Hit_State, int N) {
for(int j = 0; j < N; j++)
for(int i = 0; i < N; i++)
Hit_State[i + j*N] = 1; // 1 -> not multiple
}
void SetHitoriState( int* Hitori, int* Hit_State, int N){
bool flag1, flag2;
for(int j = 0; j < N; j++){
for(int i = 0; i < N; i++){
flag1 = false; flag2 = false;
int posElem = i + j*N;
int elem = Hitori[posElem];
// iterar por Fila
for(int k = j*N; k < N + j*N ; k++){
if( k == posElem )
continue;
if( Hitori[k] == elem ){
flag1 = true;
break;
}
}
// iterar por Columna
for(int t = i; t < N*N ;t += N ){
if( t == posElem )
continue;
if( Hitori[t] == elem){
flag2 = true;
break;
}
}
if( flag1 == true && flag2 == true) // case 4 -> multiple per row and column
Hit_State[posElem] = 4;
else if( flag1 == true ) //2 -> multiple per row
Hit_State[posElem] = 2;
else if( flag2 == true) //3 -> multiple per column
Hit_State[posElem] = 3;
}
}
}
void updateHitori(string* Hitori_Str, int* Hit_State, int N){
int i, j;
for( j = 0; j < N; j++){
for( i = 0; i < N; i++){
if( Hit_State[i + j*N] == 6)
Hitori_Str[i + j*N] = "X";
}
}
return;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* CPU */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
void tripletF(int *hitori, int* estado, int N){
int i, aux;
bool back, next;
for(i = 0; i < N*N; i++){
//int fila = i/N;
int columna = i%N;
if(columna > 0 && columna < N){
int valor = hitori[i];
aux = estado[i];
back = (hitori[i-1] == valor)? true : false;
next = (hitori[i+1] == valor)? true : false;
estado[i] = (back && next)? 5 : aux;
}
}
}
void tripletC(int *hitori, int *estado, int N){
int i, aux;
bool up, down;
for (i = 0; i < N*N; i++){
int fila = i/N;
//int columna = i%N;
if (fila > 0 && fila < N){
int valor = hitori[i];
aux = estado[i];
up = (hitori[i-N] == valor) ? true : false;
down = (hitori[i+N] == valor)? true : false;
estado[i] = (up && down) ? 5 : aux;
}
}
}
void rescateF(int *hitori, int *estado, int N){
int i, aux;
bool back, next;
for (i = 0; i < N*N; i++){
//int fila = i/N;
int columna = i%N;
if (columna > 0 && columna < N){
// int valor = hitori[i];
aux = estado[i];
back = (estado[i-1] == 6)? true : false;
next = (estado[i+1] == 6)? true : false;
estado[i] = (back || next) ? 5 : aux;
}
}
}
void rescateC(int *hitori, int *estado, int N){
int i, aux;
bool up, down;
for (i = 0; i < N*N; i++){
int fila = i/N;
//int columna = i%N;
if (fila > 0 && fila < N){
//int valor = hitori[i];
aux = estado[i];
up = (estado[i-N] == 6)? true : false;
down = (estado[i+N] == 6)? true : false;
estado[i] = (up || down) ? 5 : aux;
}
}
}
void DobleC(int* hitori,int *estado, int N){
//int f; //Fila en que esta
int c; //Columna en la que esta
int pos;
for(int i = 0; i < N*N; i++) {
bool ant = false;
bool doble = false;
//f = i / N;
c = i % N;
int valor = hitori[i];
for(int j = 0; j < N; j++){
pos = c+N*j;
doble = (ant && i != pos && hitori[pos] == valor)? true : doble;
ant = (i != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[i] = 6;
}
}
}
void DobleF(int* hitori,int *estado, int N){
int f; //Fila en que esta
//int c; //Columna en la que esta
int pos;
for(int i = 0; i < N*N; i++) {
bool ant = false;
bool doble = false;
f = i / N;
//c = i % N;
int valor = hitori[i];
for(int j = 0; j < N; j++){
pos = f*N+j;
doble = (ant && i != pos && hitori[pos] == valor)? true : doble;
ant = (i != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[i] = 6;
}
}
}
void muerteF(int *hitori, int *estado, int N){
int i, aux1, aux2;
int pos;
for(i = 0; i < N*N; i++){
int fila = i/N;
//int columna = i%N;
int valor = hitori[i];
aux1 = estado[i];
if(aux1 != 5 && aux1 !=6){
for(int j = 0; j < N; j++){
pos = fila*N+j;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[i] = aux1;
}
}
}
void muerteC(int *hitori, int *estado, int N){
int i, aux1, aux2;
int pos;
for(i = 0; i < N*N; i++){
//int fila = i/N;
int columna = i%N;
int valor = hitori[i];
aux1 = estado[i];
if(aux1 != 5 && aux1 !=6){
for(int j = 0; j < N; j++){
pos = columna+N*j;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[i] = aux1;
}
}
}
void funcionCPU(string* Hitori_Str, int* Hitori, int* estado, int N){
int i;
// Ejecutar patrones
//printf(" - TRIPLETE - \n");
tripletF(Hitori, estado, N);
tripletC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
//printf(" - DOBLE - \n");
DobleF(Hitori, estado, N);
DobleC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
for(i = 0; i < N; i++){
//printf(" - MUERTE - \n");
muerteF(Hitori, estado, N);
muerteC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
//printf(" - RESCATE - \n");
rescateF(Hitori, estado, N);
rescateC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
}
return;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* GPU primera implementacion */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* -------------------------- Deteccion de patrones ------------------------- */
__global__ void kernelTripletF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
int valor = hitori[tId];
aux = estado[tId];
back = (hitori[tId-1] == valor)? true : false;
next = (hitori[tId+1] == valor)? true : false;
estado[tId] = (back && next) ? 5 : aux;
}
}
__global__ void kernelTripletC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
//int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
int valor = hitori[tId];
aux = estado[tId];
up = (hitori[tId-N] == valor)? true : false;
down = (hitori[tId+N] == valor)? true : false;
estado[tId] = (up && down) ? 5 : aux;
}
}
__global__ void kernelDobleF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = hitori[tId];
for(int i = 0; i < N; i++){
pos = f*N+i;
doble = (ant && tId != pos && hitori[pos] == valor)? true : doble;
ant = (tId != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
__global__ void kernelDobleC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = hitori[tId];
for(int i = 0; i < N; i++){
pos = c+N*i;
doble = (ant && tId != pos && hitori[pos] == valor)? true : doble;
ant = (tId != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
/* ---------------------------- Funciones del for --------------------------- */
__global__ void kernelRescateF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
// int valor = hitori[tId];
aux = estado[tId];
back = (estado[tId-1] == 6)? true : false;
next = (estado[tId+1] == 6)? true : false;
estado[tId] = (back || next) ? 5 : aux;
}
}
__global__ void kernelRescateC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
//int valor = hitori[tId];
aux = estado[tId];
up = (estado[tId-N] == 6)? true : false;
down = (estado[tId+N] == 6)? true : false;
estado[tId] = (up || down) ? 5 : aux;
}
}
__global__ void kernelMuerteF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
int aux1, aux2, pos;
if(tId < N*N) {
int valor = hitori[tId];
aux1 = estado[tId];
if(aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
pos = f*N+i;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
__global__ void kernelMuerteC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
int aux1, aux2, pos;
if(tId < N*N) {
int valor = hitori[tId];
aux1 = estado[tId];
if (aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
pos = c+N*i;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* GPU segunda implementacion */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
__global__ void kernelTripletF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
int valor = HitoriCM[tId];
aux = estado[tId];
back = (HitoriCM[tId-1] == valor)? true : false;
next = (HitoriCM[tId+1] == valor)? true : false;
estado[tId] = (back && next) ? 5 : aux;
}
}
__global__ void kernelTripletC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
int valor = HitoriCM[tId];
aux = estado[tId];
up = (HitoriCM[tId-N] == valor)? true : false;
down = (HitoriCM[tId+N] == valor)? true : false;
estado[tId] = (up && down) ? 5 : aux;
}
}
__global__ void kernelRescateF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
// int valor = HitoriCM[tId];
aux = estado[tId];
back = (estado[tId-1] == 6)? true : false;
next = (estado[tId+1] == 6)? true : false;
estado[tId] = (back || next) ? 5 : aux;
}
}
__global__ void kernelRescateC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
// int valor = HitoriCM[tId];
aux = estado[tId];
up = (estado[tId-N] == 6)? true : false;
down = (estado[tId+N] == 6)? true : false;
estado[tId] = (up || down) ? 5 : aux;
}
}
__global__ void kernelDobleC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = HitoriCM[tId];
for(int i = 0; i < N; i++){
pos = c+N*i;
doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble;
ant = (tId != pos && HitoriCM[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
__global__ void kernelDobleF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = HitoriCM[tId];
for(int i = 0; i < N; i++){
pos = f*N+i;
doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble;
ant = (tId != pos && HitoriCM[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
__global__ void kernelMuerteF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
int aux1, aux2, pos;
if(tId < N*N) {
int valor = HitoriCM[tId];
aux1 = estado[tId];
if(aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
pos = f*N+i;
aux2 = HitoriCM[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
__global__ void kernelMuerteC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
int aux1, aux2;
if(tId < N*N) {
int valor = HitoriCM[tId];
aux1 = estado[tId];
if (aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
aux2 = HitoriCM[c+N*i];
if(valor == aux2){
aux1 = (estado[c+N*i] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
void funcionQL(string* Hitori_Str, int* Hit_State, int N){
// Visualizar Hitori
updateHitori(Hitori_Str, Hit_State, N);
showMatrix(Hitori_Str, N, N);
//printf("\n Hitori Estado \n");
//showMatrix(Hit_State, N, N);
return;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Main */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
int main(int argc, char* argv[]){
fstream FILE;
int* Hitori;
string* Hitori_Str;
int* Hit_State;
int N;
string line;
vector<tuple<int, int>> M;
string nameFile = argv[1];
// Abrir el archivo en modo lectura
FILE.open(nameFile, ios::in);
if(!FILE){
cerr << "Unable to open file!" << endl;
exit(1);
}
if( FILE.is_open() ){
getline(FILE, line);
N = stoi(line);
Hitori = new int[N*N];
Hit_State = new int[N*N];
Hitori_Str = new string[N*N];
setInitialHitoriState(Hit_State, N);
readHitoriFromFile(&FILE, Hitori, Hitori_Str, N);
SetHitoriState( Hitori, Hit_State, N);
// Parte CPU
// Inicializacin variables de tiempo
clock_t t1, t2;
double ms;
t1 = clock();
funcionCPU(Hitori_Str, Hitori, Hit_State, N);
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
printf("Tiempo de CPU: %5f \n", ms);
//cout << "Tiempo CPU: " << ms << "[ms]" << endl;
funcionQL(Hitori_Str, Hit_State, N);
SetHitoriState( Hitori, Hit_State, N);
// Parte GPU 1
// Def tiempos GPU
int* HitoriDev, *Hit_StateDev;
hipEvent_t ct1, ct2;
float dt;
hipEventCreate(&ct1);
hipEventCreate(&ct2);
int block_size = 256; // mltiplo de 32
int grid_size = (int)ceil((float)(N*N)/block_size); // ceil : funcin techo
hipMalloc(&HitoriDev, sizeof(int)*N*N);
hipMalloc(&Hit_StateDev, sizeof(int)*N*N);
hipEventCreate(&ct1);
hipEventCreate(&ct2);
hipEventRecord(ct1);
hipMemcpy(HitoriDev, Hitori, N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(Hit_StateDev, Hit_State, N*N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelTripletF), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
hipLaunchKernelGGL(( kernelTripletC), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
hipLaunchKernelGGL(( kernelDobleF), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
hipLaunchKernelGGL(( kernelDobleC), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
for(int i = 0; i < N; i++){
hipLaunchKernelGGL(( kernelMuerteF), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
hipLaunchKernelGGL(( kernelMuerteC), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
hipLaunchKernelGGL(( kernelRescateF), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
hipLaunchKernelGGL(( kernelRescateC), dim3(grid_size), dim3(block_size), 0, 0, HitoriDev, Hit_StateDev, N);
}
hipMemcpy(Hit_State, Hit_StateDev, N*N*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
cout << "Tiempo GPU 1: " << dt << "[ms]" << endl;
funcionQL(Hitori_Str, Hit_State, N);
SetHitoriState( Hitori, Hit_State, N);
// Parte GPU 2
int* Hit_StateDev2;
hipMalloc(&Hit_StateDev2, sizeof(int)*N*N);
hipEventRecord(ct1);
hipMemcpyToSymbol(HitoriCM, Hitori, N*N*sizeof(int), 0, hipMemcpyHostToDevice); // Para kernel CM
hipMemcpy(Hit_StateDev2, Hit_State, N*N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelTripletF_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
hipLaunchKernelGGL(( kernelTripletC_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
hipLaunchKernelGGL(( kernelDobleF_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
hipLaunchKernelGGL(( kernelDobleC_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
for(int i = 0; i < N; i++){
hipLaunchKernelGGL(( kernelMuerteF_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
hipLaunchKernelGGL(( kernelMuerteC_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
hipLaunchKernelGGL(( kernelRescateF_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
hipLaunchKernelGGL(( kernelRescateC_CM), dim3(grid_size), dim3(block_size), 0, 0, Hit_StateDev2, N);
}
hipMemcpy(Hit_State, Hit_StateDev2, N*N*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
cout << "Tiempo GPU 2: " << dt << "[ms]" << endl;
funcionQL(Hitori_Str, Hit_State, N);
// Liberar memoria
delete[] Hitori;
delete[] Hit_State;
delete[] Hitori_Str;
//P1
hipFree(HitoriDev);
hipFree(Hit_StateDev);
//P2
hipFree(Hit_StateDev2);
}
FILE.close();
return 0;
} | 8417d696cdc1cf5435d515d3093e9c8558df4a95.cu | #include <iostream>
#include <time.h>
#include <string>
#include <vector>
#include <sstream>
#include <cuda_runtime.h>
#include <math.h>
#include <fstream> // Libreria para leer archivos
#include <typeinfo> // for 'typeid' to work
#include <tuple>
using namespace std;
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Funciones de apoyo */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
__constant__ int HitoriCM[100*100]; //FIXME: Cambiar cuando se actualice N y M
void funcionQL(string* Hitori_Str, int* Hit_State, int N);
// Función para Splitear un String
void tokenize(string const &str, const char delim, vector<string> &out) {
// construct a stream from the string
stringstream ss(str);
string s;
while (getline(ss, s, delim)) {
out.push_back(s);
}
return;
}
/*
* Impresor de Matrix 2D - Almacenada linealmente
*/
void showMatrix(int *matrix, int N, int M) {
for(int j = 0; j < M; j++){
for(int i = 0; i < N; i++)
printf("%d ", matrix[i + j*N]);
printf("\n");
}
printf("\n");
}
void showMatrix(string* matrix, int N, int M) {
for(int j = 0; j < M; j++){
for(int i = 0; i < N; i++)
if(matrix[i + j*N].size() == 2) cout << matrix[i + j*N] << " ";
else if(matrix[i + j*N].size() == 1) cout << matrix[i + j*N] << " ";
printf("\n");
}
printf("\n");
}
/*
void showMatrix(string* matrix, int N, int M) {
for(int j = 0; j < M; j++){
for(int i = 0; i < N; i++)
cout << matrix[i + j*N] << " ";
printf("\n");
}
printf("\n");
}*/
void readHitoriFromFile(fstream* FILE, int* matrixH, string* matrixHstr, int N){
int i, j = 0;
const char delim = ' ';
string line;
vector<string> row;
while( getline(*FILE, line)){
tokenize(line, delim, row);
for(i = 0; i < N ; i++){
matrixHstr[j] = row[i];
matrixH[j++] = stoi(row[i]);
}
// Limpiar el buffer de salida
row.clear();
}
}
/*
1 -> not multiple
2 -> multiple per row
3 -> multiple per column
4 -> multiple per row and column
5 -> not paintable
6 -> paintable // Eliminado
*/
// tuple (elem , posElem)
vector<tuple<int , int>> getRemainingMultiples(int* Hit_State, int N){
int i,j;
int elem;
int posElem;
vector<tuple<int, int>> M;
tuple<int, int> tup;
/*
1 -> not multiple
2 -> multiple per row
3 -> multiple per column
4 -> multiple per row and column
5 -> not paintable
6 -> paintable // Eliminado
*/
for(j = 0; j < N; j++ ){
for(i = 0; i < N; i++){
posElem = i + j*N;
elem = Hit_State[posElem];
tup = make_tuple(elem,posElem);
switch(elem) {
case 2:
M.push_back(tup);
break;
case 3:
M.push_back(tup);
break;
case 4:
M.push_back(tup);
break;
default:
break;
}
}
}
return M;
}
/*
Función para consistencia del Hitori
Lo que está función hace es mirar si dos multiples
en la misma columna o fila tienen el mismo número y si
ambos son not paintable (5).
*/
bool isRule4Conform(int* Hit_State, int N){
int i;
vector<tuple<int, int>> M = getRemainingMultiples(Hit_State, N);
for( i = 0; i < M.size() ; i++){
}
return true;
}
/*
Ejecutar cada vez que un multiplo es pintado (6)
1. Setear todas las celdas adyacentes al múltiplo pintado.
2.
*/
bool StandardCyclePattern(int* Hitori, int* Hit_State, int N){
// Comprueba Regla 4:
// return isRule4Conform(Hit_State, N);
return true;
}
void copyHitoriToHitori(int* Hit_State, int* Hit_StateAux, int N){
int i, j;
for(j = 0; j < N; j++)
for( i = 0; j < N; j++)
Hit_StateAux[i + j*N] = Hit_State[i + j*N];
}
void setNotPaintable(int* Hit_State, tuple<int, int> tup ){
Hit_State[ get<0>(tup) ] = 5;
}
void paint(int* Hit_State, tuple<int, int> tup){
Hit_State[ get<0>(tup)] = 6;
return;
}
void setInitialHitoriState(int *Hit_State, int N) {
for(int j = 0; j < N; j++)
for(int i = 0; i < N; i++)
Hit_State[i + j*N] = 1; // 1 -> not multiple
}
void SetHitoriState( int* Hitori, int* Hit_State, int N){
bool flag1, flag2;
for(int j = 0; j < N; j++){
for(int i = 0; i < N; i++){
flag1 = false; flag2 = false;
int posElem = i + j*N;
int elem = Hitori[posElem];
// iterar por Fila
for(int k = j*N; k < N + j*N ; k++){
if( k == posElem )
continue;
if( Hitori[k] == elem ){
flag1 = true;
break;
}
}
// iterar por Columna
for(int t = i; t < N*N ;t += N ){
if( t == posElem )
continue;
if( Hitori[t] == elem){
flag2 = true;
break;
}
}
if( flag1 == true && flag2 == true) // case 4 -> multiple per row and column
Hit_State[posElem] = 4;
else if( flag1 == true ) //2 -> multiple per row
Hit_State[posElem] = 2;
else if( flag2 == true) //3 -> multiple per column
Hit_State[posElem] = 3;
}
}
}
void updateHitori(string* Hitori_Str, int* Hit_State, int N){
int i, j;
for( j = 0; j < N; j++){
for( i = 0; i < N; i++){
if( Hit_State[i + j*N] == 6)
Hitori_Str[i + j*N] = "X";
}
}
return;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* CPU */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
void tripletF(int *hitori, int* estado, int N){
int i, aux;
bool back, next;
for(i = 0; i < N*N; i++){
//int fila = i/N;
int columna = i%N;
if(columna > 0 && columna < N){
int valor = hitori[i];
aux = estado[i];
back = (hitori[i-1] == valor)? true : false;
next = (hitori[i+1] == valor)? true : false;
estado[i] = (back && next)? 5 : aux;
}
}
}
void tripletC(int *hitori, int *estado, int N){
int i, aux;
bool up, down;
for (i = 0; i < N*N; i++){
int fila = i/N;
//int columna = i%N;
if (fila > 0 && fila < N){
int valor = hitori[i];
aux = estado[i];
up = (hitori[i-N] == valor) ? true : false;
down = (hitori[i+N] == valor)? true : false;
estado[i] = (up && down) ? 5 : aux;
}
}
}
void rescateF(int *hitori, int *estado, int N){
int i, aux;
bool back, next;
for (i = 0; i < N*N; i++){
//int fila = i/N;
int columna = i%N;
if (columna > 0 && columna < N){
// int valor = hitori[i];
aux = estado[i];
back = (estado[i-1] == 6)? true : false;
next = (estado[i+1] == 6)? true : false;
estado[i] = (back || next) ? 5 : aux;
}
}
}
void rescateC(int *hitori, int *estado, int N){
int i, aux;
bool up, down;
for (i = 0; i < N*N; i++){
int fila = i/N;
//int columna = i%N;
if (fila > 0 && fila < N){
//int valor = hitori[i];
aux = estado[i];
up = (estado[i-N] == 6)? true : false;
down = (estado[i+N] == 6)? true : false;
estado[i] = (up || down) ? 5 : aux;
}
}
}
void DobleC(int* hitori,int *estado, int N){
//int f; //Fila en que esta
int c; //Columna en la que esta
int pos;
for(int i = 0; i < N*N; i++) {
bool ant = false;
bool doble = false;
//f = i / N;
c = i % N;
int valor = hitori[i];
for(int j = 0; j < N; j++){
pos = c+N*j;
doble = (ant && i != pos && hitori[pos] == valor)? true : doble;
ant = (i != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[i] = 6;
}
}
}
void DobleF(int* hitori,int *estado, int N){
int f; //Fila en que esta
//int c; //Columna en la que esta
int pos;
for(int i = 0; i < N*N; i++) {
bool ant = false;
bool doble = false;
f = i / N;
//c = i % N;
int valor = hitori[i];
for(int j = 0; j < N; j++){
pos = f*N+j;
doble = (ant && i != pos && hitori[pos] == valor)? true : doble;
ant = (i != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[i] = 6;
}
}
}
void muerteF(int *hitori, int *estado, int N){
int i, aux1, aux2;
int pos;
for(i = 0; i < N*N; i++){
int fila = i/N;
//int columna = i%N;
int valor = hitori[i];
aux1 = estado[i];
if(aux1 != 5 && aux1 !=6){
for(int j = 0; j < N; j++){
pos = fila*N+j;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[i] = aux1;
}
}
}
void muerteC(int *hitori, int *estado, int N){
int i, aux1, aux2;
int pos;
for(i = 0; i < N*N; i++){
//int fila = i/N;
int columna = i%N;
int valor = hitori[i];
aux1 = estado[i];
if(aux1 != 5 && aux1 !=6){
for(int j = 0; j < N; j++){
pos = columna+N*j;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[i] = aux1;
}
}
}
void funcionCPU(string* Hitori_Str, int* Hitori, int* estado, int N){
int i;
// Ejecutar patrones
//printf(" - TRIPLETE - \n");
tripletF(Hitori, estado, N);
tripletC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
//printf(" - DOBLE - \n");
DobleF(Hitori, estado, N);
DobleC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
for(i = 0; i < N; i++){
//printf(" - MUERTE - \n");
muerteF(Hitori, estado, N);
muerteC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
//printf(" - RESCATE - \n");
rescateF(Hitori, estado, N);
rescateC(Hitori, estado, N);
//funcionQL(Hitori_Str, estado, N);
}
return;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* GPU primera implementacion */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* -------------------------- Deteccion de patrones ------------------------- */
__global__ void kernelTripletF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
int valor = hitori[tId];
aux = estado[tId];
back = (hitori[tId-1] == valor)? true : false;
next = (hitori[tId+1] == valor)? true : false;
estado[tId] = (back && next) ? 5 : aux;
}
}
__global__ void kernelTripletC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
//int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
int valor = hitori[tId];
aux = estado[tId];
up = (hitori[tId-N] == valor)? true : false;
down = (hitori[tId+N] == valor)? true : false;
estado[tId] = (up && down) ? 5 : aux;
}
}
__global__ void kernelDobleF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = hitori[tId];
for(int i = 0; i < N; i++){
pos = f*N+i;
doble = (ant && tId != pos && hitori[pos] == valor)? true : doble;
ant = (tId != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
__global__ void kernelDobleC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = hitori[tId];
for(int i = 0; i < N; i++){
pos = c+N*i;
doble = (ant && tId != pos && hitori[pos] == valor)? true : doble;
ant = (tId != pos && hitori[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
/* ---------------------------- Funciones del for --------------------------- */
__global__ void kernelRescateF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
// int valor = hitori[tId];
aux = estado[tId];
back = (estado[tId-1] == 6)? true : false;
next = (estado[tId+1] == 6)? true : false;
estado[tId] = (back || next) ? 5 : aux;
}
}
__global__ void kernelRescateC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
//int valor = hitori[tId];
aux = estado[tId];
up = (estado[tId-N] == 6)? true : false;
down = (estado[tId+N] == 6)? true : false;
estado[tId] = (up || down) ? 5 : aux;
}
}
__global__ void kernelMuerteF(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
int aux1, aux2, pos;
if(tId < N*N) {
int valor = hitori[tId];
aux1 = estado[tId];
if(aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
pos = f*N+i;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
__global__ void kernelMuerteC(int *hitori, int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
//int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
int aux1, aux2, pos;
if(tId < N*N) {
int valor = hitori[tId];
aux1 = estado[tId];
if (aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
pos = c+N*i;
aux2 = hitori[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* GPU segunda implementacion */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
__global__ void kernelTripletF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
int valor = HitoriCM[tId];
aux = estado[tId];
back = (HitoriCM[tId-1] == valor)? true : false;
next = (HitoriCM[tId+1] == valor)? true : false;
estado[tId] = (back && next) ? 5 : aux;
}
}
__global__ void kernelTripletC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
int valor = HitoriCM[tId];
aux = estado[tId];
up = (HitoriCM[tId-N] == valor)? true : false;
down = (HitoriCM[tId+N] == valor)? true : false;
estado[tId] = (up && down) ? 5 : aux;
}
}
__global__ void kernelRescateF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool back, next;
int aux;
if(tId < N*N && c > 0 && c < N) {
// int valor = HitoriCM[tId];
aux = estado[tId];
back = (estado[tId-1] == 6)? true : false;
next = (estado[tId+1] == 6)? true : false;
estado[tId] = (back || next) ? 5 : aux;
}
}
__global__ void kernelRescateC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool up, down;
int aux;
if(tId < N*N && f > 0 && f < N) {
// int valor = HitoriCM[tId];
aux = estado[tId];
up = (estado[tId-N] == 6)? true : false;
down = (estado[tId+N] == 6)? true : false;
estado[tId] = (up || down) ? 5 : aux;
}
}
__global__ void kernelDobleC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = HitoriCM[tId];
for(int i = 0; i < N; i++){
pos = c+N*i;
doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble;
ant = (tId != pos && HitoriCM[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
__global__ void kernelDobleF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
bool ant = false;
bool doble = false;
int pos;
if(tId < N*N) {
int valor = HitoriCM[tId];
for(int i = 0; i < N; i++){
pos = f*N+i;
doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble;
ant = (tId != pos && HitoriCM[pos] == valor)? true : false;
}
if(doble) {
estado[tId] = 6;
}
}
}
__global__ void kernelMuerteF_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int f = tId / N; //Fila en que esta
// int c = tId % N; //Columna en la que esta
int aux1, aux2, pos;
if(tId < N*N) {
int valor = HitoriCM[tId];
aux1 = estado[tId];
if(aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
pos = f*N+i;
aux2 = HitoriCM[pos];
if(valor == aux2){
aux1 = (estado[pos] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
__global__ void kernelMuerteC_CM(int *estado, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
// int f = tId / N; //Fila en que esta
int c = tId % N; //Columna en la que esta
int aux1, aux2;
if(tId < N*N) {
int valor = HitoriCM[tId];
aux1 = estado[tId];
if (aux1 != 5 && aux1 != 6){
for(int i = 0; i < N; i++){
aux2 = HitoriCM[c+N*i];
if(valor == aux2){
aux1 = (estado[c+N*i] == 5)? 6 : aux1;
}
}
estado[tId] = aux1;
}
}
}
void funcionQL(string* Hitori_Str, int* Hit_State, int N){
// Visualizar Hitori
updateHitori(Hitori_Str, Hit_State, N);
showMatrix(Hitori_Str, N, N);
//printf("\n Hitori Estado \n");
//showMatrix(Hit_State, N, N);
return;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Main */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
int main(int argc, char* argv[]){
fstream FILE;
int* Hitori;
string* Hitori_Str;
int* Hit_State;
int N;
string line;
vector<tuple<int, int>> M;
string nameFile = argv[1];
// Abrir el archivo en modo lectura
FILE.open(nameFile, ios::in);
if(!FILE){
cerr << "Unable to open file!" << endl;
exit(1);
}
if( FILE.is_open() ){
getline(FILE, line);
N = stoi(line);
Hitori = new int[N*N];
Hit_State = new int[N*N];
Hitori_Str = new string[N*N];
setInitialHitoriState(Hit_State, N);
readHitoriFromFile(&FILE, Hitori, Hitori_Str, N);
SetHitoriState( Hitori, Hit_State, N);
// Parte CPU
// Inicialización variables de tiempo
clock_t t1, t2;
double ms;
t1 = clock();
funcionCPU(Hitori_Str, Hitori, Hit_State, N);
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
printf("Tiempo de CPU: %5f \n", ms);
//cout << "Tiempo CPU: " << ms << "[ms]" << endl;
funcionQL(Hitori_Str, Hit_State, N);
SetHitoriState( Hitori, Hit_State, N);
// Parte GPU 1
// Def tiempos GPU
int* HitoriDev, *Hit_StateDev;
cudaEvent_t ct1, ct2;
float dt;
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
int block_size = 256; // múltiplo de 32
int grid_size = (int)ceil((float)(N*N)/block_size); // ceil : función techo
cudaMalloc(&HitoriDev, sizeof(int)*N*N);
cudaMalloc(&Hit_StateDev, sizeof(int)*N*N);
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
cudaMemcpy(HitoriDev, Hitori, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Hit_StateDev, Hit_State, N*N*sizeof(int), cudaMemcpyHostToDevice);
kernelTripletF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
kernelTripletC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
kernelDobleF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
kernelDobleC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
for(int i = 0; i < N; i++){
kernelMuerteF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
kernelMuerteC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
kernelRescateF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
kernelRescateC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N);
}
cudaMemcpy(Hit_State, Hit_StateDev, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cout << "Tiempo GPU 1: " << dt << "[ms]" << endl;
funcionQL(Hitori_Str, Hit_State, N);
SetHitoriState( Hitori, Hit_State, N);
// Parte GPU 2
int* Hit_StateDev2;
cudaMalloc(&Hit_StateDev2, sizeof(int)*N*N);
cudaEventRecord(ct1);
cudaMemcpyToSymbol(HitoriCM, Hitori, N*N*sizeof(int), 0, cudaMemcpyHostToDevice); // Para kernel CM
cudaMemcpy(Hit_StateDev2, Hit_State, N*N*sizeof(int), cudaMemcpyHostToDevice);
kernelTripletF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
kernelTripletC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
kernelDobleF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
kernelDobleC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
for(int i = 0; i < N; i++){
kernelMuerteF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
kernelMuerteC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
kernelRescateF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
kernelRescateC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N);
}
cudaMemcpy(Hit_State, Hit_StateDev2, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cout << "Tiempo GPU 2: " << dt << "[ms]" << endl;
funcionQL(Hitori_Str, Hit_State, N);
// Liberar memoria
delete[] Hitori;
delete[] Hit_State;
delete[] Hitori_Str;
//P1
cudaFree(HitoriDev);
cudaFree(Hit_StateDev);
//P2
cudaFree(Hit_StateDev2);
}
FILE.close();
return 0;
} |
bef4e33032b2aedbf026e72dd04594c77f974914.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void squential_spmv_csr(){
for(int row = 0;row < num_rows;row++){
float dot = 0;
int row_start = row_ptr[row];
int row_end = row_ptr[row+1];
for(int elem = row_start;elem < row_end;elem++){
dot += data[elem] * x[col_index[elem]];
}
y[row] += dot;
}
}
__global__ void SpMV_CSR(int num_rows, float *data,int *col_index, int *row_ptr, float *x, float *y){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < num_rows){
float dot = 0;
int row_start = row_ptr[row];
int row_end = row_ptr[row + 1];
for(int elem = row_start;elem< row_end;elem++){
dot += data[elem] * x[col_index[elem]];
}
y[row] += dot;
}
}
__global__ void SpMV_ELL(int num_rows, float* data, int* col_index, int num_elem, float* x, float* y){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < num_rows){
float dot = 0;
for(int i = 0;i<num_elem;i++){
dot += data[row + i * num_rows] * x[col_index[row + i * num_rows]];
}
y[row] += dot;
}
} | bef4e33032b2aedbf026e72dd04594c77f974914.cu | void squential_spmv_csr(){
for(int row = 0;row < num_rows;row++){
float dot = 0;
int row_start = row_ptr[row];
int row_end = row_ptr[row+1];
for(int elem = row_start;elem < row_end;elem++){
dot += data[elem] * x[col_index[elem]];
}
y[row] += dot;
}
}
__global__ void SpMV_CSR(int num_rows, float *data,int *col_index, int *row_ptr, float *x, float *y){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < num_rows){
float dot = 0;
int row_start = row_ptr[row];
int row_end = row_ptr[row + 1];
for(int elem = row_start;elem< row_end;elem++){
dot += data[elem] * x[col_index[elem]];
}
y[row] += dot;
}
}
__global__ void SpMV_ELL(int num_rows, float* data, int* col_index, int num_elem, float* x, float* y){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < num_rows){
float dot = 0;
for(int i = 0;i<num_elem;i++){
dot += data[row + i * num_rows] * x[col_index[row + i * num_rows]];
}
y[row] += dot;
}
} |
f4e1ac657d7204cb05f9d309ce45ad2d2fc22d96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ unsigned int count = 0;
__shared__ bool isLastBlockDone;
__global__ void sum(const float* array, unsigned int N,
float* result)
{
// Each block sums a subset of the input array
float partialSum = calculatePartialSum(array, N);
if (threadIdx.x == 0) {
// Thread 0 of each block stores the partial sum
// to global memory
result[blockIdx.x] = partialSum;
// Thread 0 makes sure its result is visible to
// all other threads
__threadfence();
// Thread 0 of each block signals that it is done
unsigned int value = atomicInc(&count, gridDim.x);
// Thread 0 of each block determines if its block is
// the last block to be done
isLastBlockDone = (value == (gridDim.x - 1));
}
// Synchronize to make sure that each thread reads
// the correct value of isLastBlockDone
__syncthreads();
if (isLastBlockDone) {
// The last block sums the partial sums
// stored in result[0 .. gridDim.x-1]
float totalSum = calculateTotalSum(result);
if (threadIdx.x == 0) {
// Thread 0 of last block stores total sum
// to global memory and resets count so that
// next kernel call works properly
result[0] = totalSum;
count = 0;
}
}
}
| f4e1ac657d7204cb05f9d309ce45ad2d2fc22d96.cu | __device__ unsigned int count = 0;
__shared__ bool isLastBlockDone;
__global__ void sum(const float* array, unsigned int N,
float* result)
{
// Each block sums a subset of the input array
float partialSum = calculatePartialSum(array, N);
if (threadIdx.x == 0) {
// Thread 0 of each block stores the partial sum
// to global memory
result[blockIdx.x] = partialSum;
// Thread 0 makes sure its result is visible to
// all other threads
__threadfence();
// Thread 0 of each block signals that it is done
unsigned int value = atomicInc(&count, gridDim.x);
// Thread 0 of each block determines if its block is
// the last block to be done
isLastBlockDone = (value == (gridDim.x - 1));
}
// Synchronize to make sure that each thread reads
// the correct value of isLastBlockDone
__syncthreads();
if (isLastBlockDone) {
// The last block sums the partial sums
// stored in result[0 .. gridDim.x-1]
float totalSum = calculateTotalSum(result);
if (threadIdx.x == 0) {
// Thread 0 of last block stores total sum
// to global memory and resets count so that
// next kernel call works properly
result[0] = totalSum;
count = 0;
}
}
}
|
5b465283650d2d71c4fd5ebd429c5382ce0e9641.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Matrix multiplication Exercise : P = M . N. using Block-based shared memory
*
* This program basically follows the tutorial in class.
*
* Given the 1024*1024 matrix test case, this program got the best performance
* boost using TILE_WIDTH = 16. This was also suggested in the slide set.
*
* This exercise was executed on a MacBook Pro, with GeForce GT 650M
* Using the CPU matrixMultiplication code, it takes about 18 seconds
* Using this Block-based approach, it only take about 0.13 ~0.15 seconds
*
* See also:
* Zhou Bin@ Nvidia & USTC, 2014, October, "CUDA Programming (2)" Lecture Slides
*
*
*/
#include "stdio.h"
#include "stdlib.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#define W 1024
#define TILE_WIDTH 16
#define DEBUG 1
void printMatrix(float *Matrix)
{
const int MAX_SIZE_PRINTED = 4;
printf("This is a %d by %d matrix.\n", W,W);
if (W > MAX_SIZE_PRINTED) {
printf("Actual displayed size is cut in 2 parts shown as");
printf(" %d by %d matrix.\n", MAX_SIZE_PRINTED, MAX_SIZE_PRINTED);
printf(" The Top_LEFT CORNER OF the %d * %d matrix:\n", W, W);
}
for(int i=0;i<W;i++)
{
for(int j=0;j<W;j++)
if(i < MAX_SIZE_PRINTED && j < MAX_SIZE_PRINTED){
if (DEBUG) printf("%5.2f ",*(Matrix+i*W+j));
}
if(i < MAX_SIZE_PRINTED && DEBUG) printf("\n");
}
if (W > MAX_SIZE_PRINTED){
printf(" The LOWER_RIGHT CORNER OF the %d * %d matrix\n", W, W);
for(int i=W-MAX_SIZE_PRINTED;i<W;i++)
{
for(int j=W-MAX_SIZE_PRINTED;j<W;j++)
if (DEBUG) printf("%5.2f ",*(Matrix+i*W+j));
if(DEBUG) printf("\n");
}
}
}
/*
* This code is mostly copied from the slide set with some comments written by Ben Koo.
*
* In this test case, W = 1024, TILE_WIDTH = 16, making the dimGrid = 64 * 64
* Within each block, there are 16 * 16 threads.
*
*
*/
//Matrix Multiplication Kernel
__global__ void matrixMulKernel_block(float* Md, float* Nd, float* Pd, int Width)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
//Only calculate values when Row and Col are smaller than Width
//Otherwise there might be some threads that are beyond the bounds of
//the desirable matrix size.
if(Row < Width && Col < Width){
for (int k = 0; k < Width; ++k)
Pvalue += Md[Row * Width + k] * Nd[k * Width + Col];
Pd[Row * Width + Col] = Pvalue;
}
}
//Matrix Multiplication Kernel
__global__ void matrixMulKernel_1(float* Md, float* Nd, float* Pd, int Width)
{
__shared__ float Ms[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Ns[BLOCK_SIZE][BLOCK_SIZE];
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
const int BLOCK_SIZE = 16;
int wA = TILE_WIDTH;
int wB = TILE_WIDTH;
// Index of the first sub-matrix of M processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of M processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of M
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of N processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of N
int bStep = BLOCK_SIZE * wB;
//Pvalue stores the Pd value computed by the thread
float Pvalue = 0;
// Loop over all the sub-matrices of M and N
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
Ms[ty][tx] = Md[a + wA * ty + tx];
Ns[ty][tx] = Nd[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Pvalue += Ms[ty][k] * Ns[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
Pd[c + wB * ty + tx] = Pvalue;
}
__global__ void matrixMulKernel_usingTile(float* Md, float* Nd, float* Pd, int Width)
{
//This delcares the device memory as 16 * 16 float matrices
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
// When W = 1024, the block IDs (x * y) should be (64 * 64)
int bx = blockIdx.x; int by = blockIdx.y;
// When W = 1024, the thread IDs (x * y) should be (16 * 16)
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float PValue = 0;
// When W = 1024, m should go from 0 to 63
for (int m =0; m < Width/TILE_WIDTH; ++m){
// The following memory access takes place in shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH+ty)*Width];
//Make sure that all data are written in sync.
__syncthreads();
//Perform TILE level matrix multiplication and addition in synchrony.
for (int k = 0; k< TILE_WIDTH; ++k)
PValue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
//Take individually caldulated PValue and place it to the Pd (device memory array).
Pd[Row * Width + Col] = PValue;
}
int main()
{
int sNo = 0;
hipSetDevice(sNo%8);
int size = W*W*sizeof(float);
float *M,*N,*P;
float *d_M,*d_N,*d_P;
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
hipMalloc((void **)&d_M,size);
hipMalloc((void **)&d_N,size);
hipMalloc((void **)&d_P,size);
//Populate initial values to the M, N and P matrices
for(int i=0;i<W*W;i++)
{
*(M+i) = i;
*(N+i) = i+1;
*(P+i) = 0;
}
hipMemcpy(d_M, M,size,hipMemcpyHostToDevice);
hipMemcpy(d_N, N,size,hipMemcpyHostToDevice);
//Starting from here, set up CUDA timing mechanism
float time_elapsed = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dim3 dimGrid(W /TILE_WIDTH, W / TILE_WIDTH);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
hipLaunchKernelGGL(( matrixMulKernel_1), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_M,d_N,d_P,W);
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
//The following function returns time_elapsed using milli-seconds as time units
hipEventElapsedTime(&time_elapsed, start, stop);
//Finished timing for CUDA execution
//To display time_elapsed into a number, divide it by 1000 first.
printf("\n\nGPU Elapsed Time:%f\n", time_elapsed/1000);
hipMemcpy(P,d_P,size,hipMemcpyDeviceToHost);
printMatrix(P);
free(M);free(N);free(P);
hipFree(d_M);hipFree(d_N);hipFree(d_P);
return 0;
}
| 5b465283650d2d71c4fd5ebd429c5382ce0e9641.cu | /**
* Matrix multiplication Exercise : P = M . N. using Block-based shared memory
*
* This program basically follows the tutorial in class.
*
* Given the 1024*1024 matrix test case, this program got the best performance
* boost using TILE_WIDTH = 16. This was also suggested in the slide set.
*
* This exercise was executed on a MacBook Pro, with GeForce GT 650M
* Using the CPU matrixMultiplication code, it takes about 18 seconds
* Using this Block-based approach, it only take about 0.13 ~0.15 seconds
*
* See also:
* Zhou Bin@ Nvidia & USTC, 2014, October, "CUDA Programming (2)" Lecture Slides
*
*
*/
#include "stdio.h"
#include "stdlib.h"
#include "cuda.h"
#include "cuda_runtime.h"
#define W 1024
#define TILE_WIDTH 16
#define DEBUG 1
void printMatrix(float *Matrix)
{
const int MAX_SIZE_PRINTED = 4;
printf("This is a %d by %d matrix.\n", W,W);
if (W > MAX_SIZE_PRINTED) {
printf("Actual displayed size is cut in 2 parts shown as");
printf(" %d by %d matrix.\n", MAX_SIZE_PRINTED, MAX_SIZE_PRINTED);
printf(" The Top_LEFT CORNER OF the %d * %d matrix:\n", W, W);
}
for(int i=0;i<W;i++)
{
for(int j=0;j<W;j++)
if(i < MAX_SIZE_PRINTED && j < MAX_SIZE_PRINTED){
if (DEBUG) printf("%5.2f ",*(Matrix+i*W+j));
}
if(i < MAX_SIZE_PRINTED && DEBUG) printf("\n");
}
if (W > MAX_SIZE_PRINTED){
printf(" The LOWER_RIGHT CORNER OF the %d * %d matrix\n", W, W);
for(int i=W-MAX_SIZE_PRINTED;i<W;i++)
{
for(int j=W-MAX_SIZE_PRINTED;j<W;j++)
if (DEBUG) printf("%5.2f ",*(Matrix+i*W+j));
if(DEBUG) printf("\n");
}
}
}
/*
* This code is mostly copied from the slide set with some comments written by Ben Koo.
*
* In this test case, W = 1024, TILE_WIDTH = 16, making the dimGrid = 64 * 64
* Within each block, there are 16 * 16 threads.
*
*
*/
//Matrix Multiplication Kernel
__global__ void matrixMulKernel_block(float* Md, float* Nd, float* Pd, int Width)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
//Only calculate values when Row and Col are smaller than Width
//Otherwise there might be some threads that are beyond the bounds of
//the desirable matrix size.
if(Row < Width && Col < Width){
for (int k = 0; k < Width; ++k)
Pvalue += Md[Row * Width + k] * Nd[k * Width + Col];
Pd[Row * Width + Col] = Pvalue;
}
}
//Matrix Multiplication Kernel
__global__ void matrixMulKernel_1(float* Md, float* Nd, float* Pd, int Width)
{
__shared__ float Ms[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Ns[BLOCK_SIZE][BLOCK_SIZE];
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
const int BLOCK_SIZE = 16;
int wA = TILE_WIDTH;
int wB = TILE_WIDTH;
// Index of the first sub-matrix of M processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of M processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of M
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of N processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of N
int bStep = BLOCK_SIZE * wB;
//Pvalue stores the Pd value computed by the thread
float Pvalue = 0;
// Loop over all the sub-matrices of M and N
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
Ms[ty][tx] = Md[a + wA * ty + tx];
Ns[ty][tx] = Nd[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Pvalue += Ms[ty][k] * Ns[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
Pd[c + wB * ty + tx] = Pvalue;
}
__global__ void matrixMulKernel_usingTile(float* Md, float* Nd, float* Pd, int Width)
{
//This delcares the device memory as 16 * 16 float matrices
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
// When W = 1024, the block IDs (x * y) should be (64 * 64)
int bx = blockIdx.x; int by = blockIdx.y;
// When W = 1024, the thread IDs (x * y) should be (16 * 16)
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float PValue = 0;
// When W = 1024, m should go from 0 to 63
for (int m =0; m < Width/TILE_WIDTH; ++m){
// The following memory access takes place in shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH+ty)*Width];
//Make sure that all data are written in sync.
__syncthreads();
//Perform TILE level matrix multiplication and addition in synchrony.
for (int k = 0; k< TILE_WIDTH; ++k)
PValue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
//Take individually caldulated PValue and place it to the Pd (device memory array).
Pd[Row * Width + Col] = PValue;
}
int main()
{
int sNo = 0;
cudaSetDevice(sNo%8);
int size = W*W*sizeof(float);
float *M,*N,*P;
float *d_M,*d_N,*d_P;
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
cudaMalloc((void **)&d_M,size);
cudaMalloc((void **)&d_N,size);
cudaMalloc((void **)&d_P,size);
//Populate initial values to the M, N and P matrices
for(int i=0;i<W*W;i++)
{
*(M+i) = i;
*(N+i) = i+1;
*(P+i) = 0;
}
cudaMemcpy(d_M, M,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N,size,cudaMemcpyHostToDevice);
//Starting from here, set up CUDA timing mechanism
float time_elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 dimGrid(W /TILE_WIDTH, W / TILE_WIDTH);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
matrixMulKernel_1<<< dimGrid, dimBlock >>>(d_M,d_N,d_P,W);
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
//The following function returns time_elapsed using milli-seconds as time units
cudaEventElapsedTime(&time_elapsed, start, stop);
//Finished timing for CUDA execution
//To display time_elapsed into a number, divide it by 1000 first.
printf("\n\nGPU Elapsed Time:%f\n", time_elapsed/1000);
cudaMemcpy(P,d_P,size,cudaMemcpyDeviceToHost);
printMatrix(P);
free(M);free(N);free(P);
cudaFree(d_M);cudaFree(d_N);cudaFree(d_P);
return 0;
}
|
1fde679808e9ffd7e0abb25987b0f75d42948852.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//git hello 0
//biyao test
#include<stdio.h>
//#include<stdlib.h>
//#include<cuda.h>
#include<cuda_runtime.h>//
__global__ void helloFromGPU(void)
{
printf("Hello World from GPU!\n");
}
int main()
{
printf("Hello world from CPU!\n");
hipLaunchKernelGGL(( helloFromGPU) , dim3(2),dim3(10), 0, 0, );
hipDeviceReset();
return 0;
} | 1fde679808e9ffd7e0abb25987b0f75d42948852.cu | //git hello 0
//biyao test
#include<stdio.h>
//#include<stdlib.h>
//#include<cuda.h>
#include<cuda_runtime.h>//提供了时间计算的功能函数
__global__ void helloFromGPU(void)
{
printf("Hello World from GPU!\n");
}
int main()
{
printf("Hello world from CPU!\n");
helloFromGPU <<<2,10>>>();
cudaDeviceReset();
return 0;
} |
63ca8db72e3216466ae765ecc04188f0c9a494d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
if(this->layer_param_.phase() == caffe::TEST) {
//LOG(INFO)<<"enter here";
string filename = "./data/test_data/score.txt";
std::ofstream file(filename.c_str(), std::ios::app);
const Dtype* out_data = top[0]->cpu_data();
//fetch the 3 rd channel
for(int i = 2; i < count; i += 3)
file << out_data[i] << std::endl;
file.close();
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| 63ca8db72e3216466ae765ecc04188f0c9a494d6.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
if(this->layer_param_.phase() == caffe::TEST) {
//LOG(INFO)<<"enter here";
string filename = "./data/test_data/score.txt";
std::ofstream file(filename.c_str(), std::ios::app);
const Dtype* out_data = top[0]->cpu_data();
//fetch the 3 rd channel
for(int i = 2; i < count; i += 3)
file << out_data[i] << std::endl;
file.close();
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
b09618f45f949392f25be38543ab2284acc4389d.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) 2015 by Contributors
* \file convolution.cu
* \brief
* \author Bing Xu
*/
#include "./convolution-inl.h"
#include <vector>
#if MXNET_USE_CUDNN == 1
#include "./cudnn_convolution-inl.h"
#endif // MXNET_USE_CUDNN
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ConvolutionParam param, int dtype,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape,
Context ctx) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1
if (param.dilate[0] == 1 && param.dilate[1] == 1) {
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNConvolutionOp<DType>(param, in_shape, out_shape, ctx);
})
} else {
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ConvolutionOp<gpu, DType>(param);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ConvolutionOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN
return op;
}
template<>
void mx_xpu_asum<mshadow::gpu,float>(mshadow::Stream<mshadow::gpu>* s,const int n, const float* x, float* y)
{
hipblasSasum(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s),n,x,1,y);
}
template<>
void mx_xpu_asum<mshadow::gpu,double>(mshadow::Stream<mshadow::gpu>* s,const int n, const double* x, double* y)
{
hipblasDasum(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s),n,x,1,y);
}
template<>
void mx_xpu_asum<mshadow::cpu,float>(mshadow::Stream<mshadow::cpu>* s,const int n, const float* x, float* y)
{
*y=cblas_sasum(n,x,1);
}
template<>
void mx_xpu_asum<mshadow::cpu,double>(mshadow::Stream<mshadow::cpu>* s,const int n, const double* x, double* y)
{
*y=cblas_dasum(n,x,1);
}
template<>
void mx_xpu_asum<mshadow::cpu,mshadow::half::half_t>(mshadow::Stream<mshadow::cpu>* s,const int n, const mshadow::half::half_t* x, mshadow::half::half_t* y){}
template<>
void mx_xpu_asum<mshadow::gpu,mshadow::half::half_t>(mshadow::Stream<mshadow::gpu>* s,const int n, const mshadow::half::half_t* x, mshadow::half::half_t* y){}
template <>
void mx_xpu_scal<gpu,double>(mshadow::Stream<mshadow::gpu>* s,const int N, const double alpha, double *X) {
hipblasDscal(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s), N, &alpha, X, 1);
}
template <>
void mx_xpu_scal<gpu,float>(mshadow::Stream<mshadow::gpu>* s,const int N, const float alpha, float *X) {
hipblasSscal(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s), N, &alpha, X, 1);
}
template <>
void mx_xpu_scal<cpu,double>(mshadow::Stream<mshadow::cpu>* s,const int N, const double alpha, double *X) {
cblas_dscal(N, alpha, X, 1);
}
template <>
void mx_xpu_scal<cpu,float>(mshadow::Stream<mshadow::cpu>* s,const int N, const float alpha, float *X) {
cblas_sscal(N, alpha, X, 1);
}
template<>
void mx_xpu_scal<mshadow::cpu,mshadow::half::half_t>(mshadow::Stream<mshadow::cpu>* s,const int N, const mshadow::half::half_t alpha, mshadow::half::half_t* X){}
template<>
void mx_xpu_scal<mshadow::gpu,mshadow::half::half_t>(mshadow::Stream<mshadow::gpu>* s,const int N, const mshadow::half::half_t alpha, mshadow::half::half_t* X){}
} // namespace op
} // namespace mxnet
| b09618f45f949392f25be38543ab2284acc4389d.cu | /*!
* Copyright (c) 2015 by Contributors
* \file convolution.cu
* \brief
* \author Bing Xu
*/
#include "./convolution-inl.h"
#include <vector>
#if MXNET_USE_CUDNN == 1
#include "./cudnn_convolution-inl.h"
#endif // MXNET_USE_CUDNN
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ConvolutionParam param, int dtype,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape,
Context ctx) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1
if (param.dilate[0] == 1 && param.dilate[1] == 1) {
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNConvolutionOp<DType>(param, in_shape, out_shape, ctx);
})
} else {
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ConvolutionOp<gpu, DType>(param);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ConvolutionOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN
return op;
}
template<>
void mx_xpu_asum<mshadow::gpu,float>(mshadow::Stream<mshadow::gpu>* s,const int n, const float* x, float* y)
{
cublasSasum(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s),n,x,1,y);
}
template<>
void mx_xpu_asum<mshadow::gpu,double>(mshadow::Stream<mshadow::gpu>* s,const int n, const double* x, double* y)
{
cublasDasum(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s),n,x,1,y);
}
template<>
void mx_xpu_asum<mshadow::cpu,float>(mshadow::Stream<mshadow::cpu>* s,const int n, const float* x, float* y)
{
*y=cblas_sasum(n,x,1);
}
template<>
void mx_xpu_asum<mshadow::cpu,double>(mshadow::Stream<mshadow::cpu>* s,const int n, const double* x, double* y)
{
*y=cblas_dasum(n,x,1);
}
template<>
void mx_xpu_asum<mshadow::cpu,mshadow::half::half_t>(mshadow::Stream<mshadow::cpu>* s,const int n, const mshadow::half::half_t* x, mshadow::half::half_t* y){}
template<>
void mx_xpu_asum<mshadow::gpu,mshadow::half::half_t>(mshadow::Stream<mshadow::gpu>* s,const int n, const mshadow::half::half_t* x, mshadow::half::half_t* y){}
template <>
void mx_xpu_scal<gpu,double>(mshadow::Stream<mshadow::gpu>* s,const int N, const double alpha, double *X) {
cublasDscal(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s), N, &alpha, X, 1);
}
template <>
void mx_xpu_scal<gpu,float>(mshadow::Stream<mshadow::gpu>* s,const int N, const float alpha, float *X) {
cublasSscal(mshadow::Stream<mshadow::gpu>::GetBlasHandle(s), N, &alpha, X, 1);
}
template <>
void mx_xpu_scal<cpu,double>(mshadow::Stream<mshadow::cpu>* s,const int N, const double alpha, double *X) {
cblas_dscal(N, alpha, X, 1);
}
template <>
void mx_xpu_scal<cpu,float>(mshadow::Stream<mshadow::cpu>* s,const int N, const float alpha, float *X) {
cblas_sscal(N, alpha, X, 1);
}
template<>
void mx_xpu_scal<mshadow::cpu,mshadow::half::half_t>(mshadow::Stream<mshadow::cpu>* s,const int N, const mshadow::half::half_t alpha, mshadow::half::half_t* X){}
template<>
void mx_xpu_scal<mshadow::gpu,mshadow::half::half_t>(mshadow::Stream<mshadow::gpu>* s,const int N, const mshadow::half::half_t alpha, mshadow::half::half_t* X){}
} // namespace op
} // namespace mxnet
|
f5cf3cd73831794c6633b34c05600de4c964175c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//#define DEBUGPRINT 0
__global__ void compute_grid_h_kernel( double *gridh, double *xm1, double *ym1, double *zm1, int nelt, int lx1, int ly1, int lz1, int if3d, int nnel,int lxy, int lxyz ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nnel){
int ix= id % lx1;
int iy= (id/lx1)%ly1;
int iz = (id / (lxy))%lz1;
int e = id / (lxyz);
int km1,kp1,izm,izp;
double x1,x2,x3,x4,x5,x6,y1,y2,y3,y4,y5,y6,z1,z2,z3,z4,z5,z6;
double a1,a2,a3,b1,b2,b3,c1,c2,c3,d1,d2,d3;
if (if3d) {
km1=iz-1;
kp1=iz+1;
izm=km1;
if (km1 < 0){ izm=iz;}
izp=kp1;
if (kp1 > lz1-1) {izp=iz;}
}
else {
izm=iz;
izp=iz;
}
int jm1=iy-1;
int jp1=iy+1;
int iym=jm1;
if (jm1 < 0) {iym=iy;} // <0 instead of <1 because c arrays starts with 0
int iyp=jp1;
if (jp1 > ly1-1) {iyp=iy;}
int im1=ix-1;
int ip1=ix+1;
int ixm=im1;
if (im1 < 0){ ixm=ix;}
int ixp=ip1;
if (ip1 > lx1-1) {ixp=ix;}
// if(id<12){printf("id is %d ix %d iy %d iz %d e %d \n",id, ix,iy,iz,e);}
// if(id==0){ for(int kk=0; kk<100;kk++){printf("id %d xm1 %lf ym1 %lf zm1 %lf \n",kk,xm1[kk],ym1[kk],zm1[kk]);}}
//if(id<10){printf("whole thing %d \n",e*lxyz+iz*lxy+iy*lx1+ixm);}
x1 = xm1[e*lxyz+iz*lxy+iy*lx1+ixm];// possible optimizations for array reads . adeesha
x2 = xm1[e*lxyz+iz*lxy+iy*lx1+ixp];
x3 = xm1[e*lxyz+iz*lxy+iym*lx1+ix];
x4 = xm1[e*lxyz+iz*lxy+iyp*lx1+ix];
x5 = xm1[e*lxyz+izm*lxy+iy*lx1+ix];
x6 = xm1[e*lxyz+izp*lxy+iy*lx1+ix];
y1 = ym1[e*lxyz+iz*lxy+iy*lx1+ixm];
y2 = ym1[e*lxyz+iz*lxy+iy*lx1+ixp];
y3 = ym1[e*lxyz+iz*lxy+iym*lx1+ix];
y4 = ym1[e*lxyz+iz*lxy+iyp*lx1+ix];
y5 = ym1[e*lxyz+izm*lxy+iy*lx1+ix];
y6 = ym1[e*lxyz+izp*lxy+iy*lx1+ix];
z1 = zm1[e*lxyz+iz*lxy+iy*lx1+ixm];
z2 = zm1[e*lxyz+iz*lxy+iy*lx1+ixp];
z3 = zm1[e*lxyz+iz*lxy+iym*lx1+ix];
z4 = zm1[e*lxyz+iz*lxy+iyp*lx1+ix];
z5 = zm1[e*lxyz+izm*lxy+iy*lx1+ix];
z6 = zm1[e*lxyz+izp*lxy+iy*lx1+ix];
a1=x2-x1;
a2=y2-y1;
a3=z2-z1;
b1=x4-x3;
b2=y4-y3;
b3=z4-z3;
c1=x6-x5;
c2=y6-y5;
c3=z6-z5;
// if(id<12){printf("a1 %lf a2 %lf a3 %lf b1 %lf b2 %lf b3 %lf x1 %lf x2 %lf y1 %lf y2 %lf z1 %lf z2 %lf \n",a1,a2,a3,b1,b2,b3,x1,x2,y1,y2,z1,z2);}
double fact;
if (if3d) {
fact=0.125; // h doesn't reach into corners of neighboring elements
if (ixp==ix || ixm==ix){ fact=2.0*fact;}
if (iym==iy || iyp==iy) {fact=2.0*fact;}
if (izm==iz||izp==iz) {fact=2.0*fact;}
//call cross(d,a,b);
// cartesian vector cross product in gpu
d1 = a2*b3 - a3*b2;
d2 = a3*b1 - a1*b3;
d3 = a1*b2 - a2*b1;
// vector dot product to get sum
double sum = c1*d1+c2*d2+c3*d3;
// if(id<12){printf("c1 %lf c2 %lf c3 %lf d1 %lf d2 %lf d3 %lf sum is :%lf fact: %lf \n",c1,c2,c3,d1,d2,d3,sum,fact);}
gridh[e*lxyz+iz*lxy+iy*lx1+ix]=fact*sum;
gridh[e*lxyz+iz*lxy+iy*lx1+ix]=pow(fabs(gridh[e*lxyz+iz*lxy+iy*lx1+ix]),(1.0/3.0));
}
else{
fact=0.25;
if (ixp==ix||ixm==ix) fact=2.0*fact;
if (iym==iy||iyp==iy) fact=2.0*fact;
gridh[e*lxyz+iz*lxy+iy*lx1+ix]=sqrtf(fact*fabs(a1*b2-a2*b1));
}
}
}
extern "C" void compute_grid_h_gpu_wrapper_(int *glbblockSize1,double *d_gridh, double *d_xm1, double *d_ym1, double *d_zm1, int *nelt, int *lx1, int *ly1, int *lz1, int *if3d){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
printf("CUDA: Start compute_grid_h_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_grid_h_gpu_wrapper values glbblockSize1=%d, nelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d\n",glbblockSize1[0],nelt[0],lx1[0],ly1[0],lz1[0],if3d[0]);
#endif
int blockSize = glbblockSize1[0], gridSize;
int lxy=lx1[0]*ly1[0];
int lxyz=lxy*lz1[0];
int nnel=nelt[0]*lxyz;
gridSize = (int)ceil((float)nnel/blockSize);
#ifdef DEBUGPRINT
printf("CUDA: compute_grid_h_gpu_wrapper grid size = %d, block size = %d \n",gridSize,blockSize);
#endif
hipLaunchKernelGGL(( compute_grid_h_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_gridh, d_xm1, d_ym1, d_zm1, nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],nnel,lxy,lxyz);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
printf("CUDA: End compute_grid_h_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
#endif
/*printf(" $$$ compute_grid_h_gpu_wrapper check start ");
for(int b=0;b<10;b++){
printf("d_gridh[%d] = %lf \n",b,d_gridh[b]);
}
printf(" $$$ compute_grid_h_gpu_wrapper check End ");*/
}
__global__ void compute_mesh_h_kernel( double *meshh, double *xm1, double *ym1, double *zm1, int nelt, int lx1, int ly1, int lz1, int if3d, double rp , int ncrn, int lxy, int lxyz){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt){
int ic1,ic2;
//int ic2= id % ncrn;
//int ic1= (id/ncrn)%ncrn;
int e = id;
int km1,kp1,izm,izp;
double xcrn[8],ycrn[8],zcrn[8];
int k1=1;
int k2= lz1;
int j1=1;
int j2= ly1;
int i1=1;
int i2= lx1;
xcrn[0] = xm1[e*lxyz];
xcrn[1] = xm1[e*lxyz+lx1-1];
xcrn[2] = xm1[e*lxyz+(ly1-1)*lx1];
xcrn[3] = xm1[e*lxyz+(ly1-1)*lx1+lx1-1];
ycrn[0] = ym1[e*lxyz];
ycrn[1] = ym1[e*lxyz+lx1-1];
ycrn[2] = ym1[e*lxyz+(ly1-1)*lx1];
ycrn[3]= ym1[e*lxyz+(ly1-1)*lx1+lx1-1];
if (if3d) {
xcrn[4] = xm1[e*lxyz+(lz1-1)*lxy];
xcrn[5] = xm1[e*lxyz+(lz1-1)*lxy+lx1-1];
xcrn[6] = xm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
xcrn[7] = xm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1];
ycrn[4] = ym1[e*lxyz+(lz1-1)*lxy];
ycrn[5] = ym1[e*lxyz+(lz1-1)*lxy+lx1-1];
ycrn[6] = ym1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
ycrn[7] = ym1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1];
zcrn[0] = zm1[e*lxyz];
zcrn[1] = zm1[e*lxyz+lx1-1];
zcrn[2] = zm1[e*lxyz+(ly1-1)*lx1];
zcrn[3] = zm1[e*lxyz+(ly1-1)*lx1+lx1-1];
zcrn[4] = zm1[e*lxyz+(lz1-1)*lxy];
zcrn[5] = zm1[e*lxyz+(lz1-1)*lxy+lx1-1];
zcrn[6] = zm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
zcrn[7] = zm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1];
}
double dist = 10e+36;
for (ic1 = 0 ; ic1<ncrn;ic1++){
for (ic2 = 0; ic2<ncrn; ic2++){
if(ic2 !=ic1){
double txcrn = xcrn[ic2]-xcrn[ic1];
double tycrn = ycrn[ic2]-ycrn[ic1];
double tzcrn = zcrn[ic2]-zcrn[ic1];
double dtmp = txcrn*txcrn+tycrn*tycrn+tzcrn*tzcrn;
double sqrtdtmp = sqrtf(dtmp) ;
if(sqrtdtmp<dist){
dist =sqrtdtmp;
}
}
}
}
/*if(id==0){
for(int aa=0;aa<2;aa++){
printf("$$$ print from cuda xm1 = %lf, ym1 =%lf zm1 = %lf \n",xm1[aa],ym1[aa],zm1[aa]);
printf("$$$ print from cuda xcrn = %lf, ycrn =%lf zcrn = %lf \n",xcrn[aa],ycrn[aa],zcrn[aa]);
}
printf("$$$ print from cuda dist = %lf, rp=%lf \n",dist,rp );
}*/
meshh[e]= dist*rp;
}
}
extern "C" void compute_mesh_h_gpu_wrapper_(int *glbblockSize2,double *d_meshh, double *d_xm1, double *d_ym1, double *d_zm1, int *nelt, int *lx1, int *ly1, int *lz1, int *if3d, double *rp, int *ncrn){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
printf("CUDA: Start compute_mesh_h_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_mesh_h_gpu_wrapper values nelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d rp=%lf,ncrn=%d \n",nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],rp[0],ncrn[0]);
#endif
int blockSize = glbblockSize2[0], gridSize;
gridSize = (int)ceil((float)nelt[0]/blockSize);
int lxy=lx1[0]*ly1[0];
int lxyz=lz1[0]*lxy;
hipLaunchKernelGGL(( compute_mesh_h_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_meshh, d_xm1, d_ym1, d_zm1, nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],rp[0],ncrn[0],lxy,lxyz);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
printf("CUDA: End compute_mesh_h_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
#endif
/*printf(" $$$ compute_mesh_h_gpu_wrapper check start ");
for(int b=0;b<10;b++){
printf("d_meshh[%d] = %lf \n",b,d_gridh[b]);
}
printf(" $$$ compute_mesh_h_gpu_wrapper check End ")*/
}
| f5cf3cd73831794c6633b34c05600de4c964175c.cu | #include <stdio.h>
//#define DEBUGPRINT 0
__global__ void compute_grid_h_kernel( double *gridh, double *xm1, double *ym1, double *zm1, int nelt, int lx1, int ly1, int lz1, int if3d, int nnel,int lxy, int lxyz ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nnel){
int ix= id % lx1;
int iy= (id/lx1)%ly1;
int iz = (id / (lxy))%lz1;
int e = id / (lxyz);
int km1,kp1,izm,izp;
double x1,x2,x3,x4,x5,x6,y1,y2,y3,y4,y5,y6,z1,z2,z3,z4,z5,z6;
double a1,a2,a3,b1,b2,b3,c1,c2,c3,d1,d2,d3;
if (if3d) {
km1=iz-1;
kp1=iz+1;
izm=km1;
if (km1 < 0){ izm=iz;}
izp=kp1;
if (kp1 > lz1-1) {izp=iz;}
}
else {
izm=iz;
izp=iz;
}
int jm1=iy-1;
int jp1=iy+1;
int iym=jm1;
if (jm1 < 0) {iym=iy;} // <0 instead of <1 because c arrays starts with 0
int iyp=jp1;
if (jp1 > ly1-1) {iyp=iy;}
int im1=ix-1;
int ip1=ix+1;
int ixm=im1;
if (im1 < 0){ ixm=ix;}
int ixp=ip1;
if (ip1 > lx1-1) {ixp=ix;}
// if(id<12){printf("id is %d ix %d iy %d iz %d e %d \n",id, ix,iy,iz,e);}
// if(id==0){ for(int kk=0; kk<100;kk++){printf("id %d xm1 %lf ym1 %lf zm1 %lf \n",kk,xm1[kk],ym1[kk],zm1[kk]);}}
//if(id<10){printf("whole thing %d \n",e*lxyz+iz*lxy+iy*lx1+ixm);}
x1 = xm1[e*lxyz+iz*lxy+iy*lx1+ixm];// possible optimizations for array reads . adeesha
x2 = xm1[e*lxyz+iz*lxy+iy*lx1+ixp];
x3 = xm1[e*lxyz+iz*lxy+iym*lx1+ix];
x4 = xm1[e*lxyz+iz*lxy+iyp*lx1+ix];
x5 = xm1[e*lxyz+izm*lxy+iy*lx1+ix];
x6 = xm1[e*lxyz+izp*lxy+iy*lx1+ix];
y1 = ym1[e*lxyz+iz*lxy+iy*lx1+ixm];
y2 = ym1[e*lxyz+iz*lxy+iy*lx1+ixp];
y3 = ym1[e*lxyz+iz*lxy+iym*lx1+ix];
y4 = ym1[e*lxyz+iz*lxy+iyp*lx1+ix];
y5 = ym1[e*lxyz+izm*lxy+iy*lx1+ix];
y6 = ym1[e*lxyz+izp*lxy+iy*lx1+ix];
z1 = zm1[e*lxyz+iz*lxy+iy*lx1+ixm];
z2 = zm1[e*lxyz+iz*lxy+iy*lx1+ixp];
z3 = zm1[e*lxyz+iz*lxy+iym*lx1+ix];
z4 = zm1[e*lxyz+iz*lxy+iyp*lx1+ix];
z5 = zm1[e*lxyz+izm*lxy+iy*lx1+ix];
z6 = zm1[e*lxyz+izp*lxy+iy*lx1+ix];
a1=x2-x1;
a2=y2-y1;
a3=z2-z1;
b1=x4-x3;
b2=y4-y3;
b3=z4-z3;
c1=x6-x5;
c2=y6-y5;
c3=z6-z5;
// if(id<12){printf("a1 %lf a2 %lf a3 %lf b1 %lf b2 %lf b3 %lf x1 %lf x2 %lf y1 %lf y2 %lf z1 %lf z2 %lf \n",a1,a2,a3,b1,b2,b3,x1,x2,y1,y2,z1,z2);}
double fact;
if (if3d) {
fact=0.125; // h doesn't reach into corners of neighboring elements
if (ixp==ix || ixm==ix){ fact=2.0*fact;}
if (iym==iy || iyp==iy) {fact=2.0*fact;}
if (izm==iz||izp==iz) {fact=2.0*fact;}
//call cross(d,a,b);
// cartesian vector cross product in gpu
d1 = a2*b3 - a3*b2;
d2 = a3*b1 - a1*b3;
d3 = a1*b2 - a2*b1;
// vector dot product to get sum
double sum = c1*d1+c2*d2+c3*d3;
// if(id<12){printf("c1 %lf c2 %lf c3 %lf d1 %lf d2 %lf d3 %lf sum is :%lf fact: %lf \n",c1,c2,c3,d1,d2,d3,sum,fact);}
gridh[e*lxyz+iz*lxy+iy*lx1+ix]=fact*sum;
gridh[e*lxyz+iz*lxy+iy*lx1+ix]=pow(fabs(gridh[e*lxyz+iz*lxy+iy*lx1+ix]),(1.0/3.0));
}
else{
fact=0.25;
if (ixp==ix||ixm==ix) fact=2.0*fact;
if (iym==iy||iyp==iy) fact=2.0*fact;
gridh[e*lxyz+iz*lxy+iy*lx1+ix]=sqrtf(fact*fabs(a1*b2-a2*b1));
}
}
}
extern "C" void compute_grid_h_gpu_wrapper_(int *glbblockSize1,double *d_gridh, double *d_xm1, double *d_ym1, double *d_zm1, int *nelt, int *lx1, int *ly1, int *lz1, int *if3d){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
printf("CUDA: Start compute_grid_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_grid_h_gpu_wrapper values glbblockSize1=%d, nelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d\n",glbblockSize1[0],nelt[0],lx1[0],ly1[0],lz1[0],if3d[0]);
#endif
int blockSize = glbblockSize1[0], gridSize;
int lxy=lx1[0]*ly1[0];
int lxyz=lxy*lz1[0];
int nnel=nelt[0]*lxyz;
gridSize = (int)ceil((float)nnel/blockSize);
#ifdef DEBUGPRINT
printf("CUDA: compute_grid_h_gpu_wrapper grid size = %d, block size = %d \n",gridSize,blockSize);
#endif
compute_grid_h_kernel<<<gridSize, blockSize>>>(d_gridh, d_xm1, d_ym1, d_zm1, nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],nnel,lxy,lxyz);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
printf("CUDA: End compute_grid_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
#endif
/*printf(" $$$ compute_grid_h_gpu_wrapper check start ");
for(int b=0;b<10;b++){
printf("d_gridh[%d] = %lf \n",b,d_gridh[b]);
}
printf(" $$$ compute_grid_h_gpu_wrapper check End ");*/
}
__global__ void compute_mesh_h_kernel( double *meshh, double *xm1, double *ym1, double *zm1, int nelt, int lx1, int ly1, int lz1, int if3d, double rp , int ncrn, int lxy, int lxyz){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt){
int ic1,ic2;
//int ic2= id % ncrn;
//int ic1= (id/ncrn)%ncrn;
int e = id;
int km1,kp1,izm,izp;
double xcrn[8],ycrn[8],zcrn[8];
int k1=1;
int k2= lz1;
int j1=1;
int j2= ly1;
int i1=1;
int i2= lx1;
xcrn[0] = xm1[e*lxyz];
xcrn[1] = xm1[e*lxyz+lx1-1];
xcrn[2] = xm1[e*lxyz+(ly1-1)*lx1];
xcrn[3] = xm1[e*lxyz+(ly1-1)*lx1+lx1-1];
ycrn[0] = ym1[e*lxyz];
ycrn[1] = ym1[e*lxyz+lx1-1];
ycrn[2] = ym1[e*lxyz+(ly1-1)*lx1];
ycrn[3]= ym1[e*lxyz+(ly1-1)*lx1+lx1-1];
if (if3d) {
xcrn[4] = xm1[e*lxyz+(lz1-1)*lxy];
xcrn[5] = xm1[e*lxyz+(lz1-1)*lxy+lx1-1];
xcrn[6] = xm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
xcrn[7] = xm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1];
ycrn[4] = ym1[e*lxyz+(lz1-1)*lxy];
ycrn[5] = ym1[e*lxyz+(lz1-1)*lxy+lx1-1];
ycrn[6] = ym1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
ycrn[7] = ym1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1];
zcrn[0] = zm1[e*lxyz];
zcrn[1] = zm1[e*lxyz+lx1-1];
zcrn[2] = zm1[e*lxyz+(ly1-1)*lx1];
zcrn[3] = zm1[e*lxyz+(ly1-1)*lx1+lx1-1];
zcrn[4] = zm1[e*lxyz+(lz1-1)*lxy];
zcrn[5] = zm1[e*lxyz+(lz1-1)*lxy+lx1-1];
zcrn[6] = zm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
zcrn[7] = zm1[e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+lx1-1];
}
double dist = 10e+36;
for (ic1 = 0 ; ic1<ncrn;ic1++){
for (ic2 = 0; ic2<ncrn; ic2++){
if(ic2 !=ic1){
double txcrn = xcrn[ic2]-xcrn[ic1];
double tycrn = ycrn[ic2]-ycrn[ic1];
double tzcrn = zcrn[ic2]-zcrn[ic1];
double dtmp = txcrn*txcrn+tycrn*tycrn+tzcrn*tzcrn;
double sqrtdtmp = sqrtf(dtmp) ;
if(sqrtdtmp<dist){
dist =sqrtdtmp;
}
}
}
}
/*if(id==0){
for(int aa=0;aa<2;aa++){
printf("$$$ print from cuda xm1 = %lf, ym1 =%lf zm1 = %lf \n",xm1[aa],ym1[aa],zm1[aa]);
printf("$$$ print from cuda xcrn = %lf, ycrn =%lf zcrn = %lf \n",xcrn[aa],ycrn[aa],zcrn[aa]);
}
printf("$$$ print from cuda dist = %lf, rp=%lf \n",dist,rp );
}*/
meshh[e]= dist*rp;
}
}
extern "C" void compute_mesh_h_gpu_wrapper_(int *glbblockSize2,double *d_meshh, double *d_xm1, double *d_ym1, double *d_zm1, int *nelt, int *lx1, int *ly1, int *lz1, int *if3d, double *rp, int *ncrn){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
printf("CUDA: Start compute_mesh_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_mesh_h_gpu_wrapper values nelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d rp=%lf,ncrn=%d \n",nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],rp[0],ncrn[0]);
#endif
int blockSize = glbblockSize2[0], gridSize;
gridSize = (int)ceil((float)nelt[0]/blockSize);
int lxy=lx1[0]*ly1[0];
int lxyz=lz1[0]*lxy;
compute_mesh_h_kernel<<<gridSize, blockSize>>>(d_meshh, d_xm1, d_ym1, d_zm1, nelt[0],lx1[0],ly1[0],lz1[0],if3d[0],rp[0],ncrn[0],lxy,lxyz);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
printf("CUDA: End compute_mesh_h_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
#endif
/*printf(" $$$ compute_mesh_h_gpu_wrapper check start ");
for(int b=0;b<10;b++){
printf("d_meshh[%d] = %lf \n",b,d_gridh[b]);
}
printf(" $$$ compute_mesh_h_gpu_wrapper check End ")*/
}
|
f44b76b498b25f14651e784c0d5120539c341b65.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
| f44b76b498b25f14651e784c0d5120539c341b65.cu | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
4293e6971af95181c6c953e2107e4c06dd7b57f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include <stdio.h>
// Number of threads in the CUDA block.
__device__ static unsigned getNumThreads() {
return blockDim.x;
}
// Thread ID in the CUDA block
__device__ static unsigned getThreadId() {
return threadIdx.x;
}
// Warp ID in the CUDA block
__device__ static unsigned getWarpId() {
return threadIdx.x >> DS_Max_Worker_Warp_Size_Log2;
}
//// Team ID in the CUDA grid
//__device__ static unsigned getTeamId() {
// return blockIdx.x;
//}
// The CUDA thread ID of the master thread.
__device__ static unsigned getMasterThreadId() {
unsigned Mask = DS_Max_Worker_Warp_Size - 1;
return (getNumThreads() - 1) & (~Mask);
}
// The lowest ID among the active threads in the warp.
__device__ static unsigned getWarpMasterActiveThreadId() {
unsigned long long Mask = __ballot(true);
unsigned long long ShNum = 32 - (getThreadId() & DS_Max_Worker_Warp_Size_Log2_Mask);
unsigned long long Sh = Mask << ShNum;
return __popc(Sh);
}
// Return true if this is the master thread.
__device__ static bool IsMasterThread() {
return getMasterThreadId() == getThreadId();
}
// Return true if this is the first thread in the warp.
//static bool IsWarpMasterThread() {
// return (getThreadId() & DS_Max_Worker_Warp_Size_Log2_Mask) == 0u;
//}
// Return true if this is the first active thread in the warp.
__device__ static bool IsWarpMasterActiveThread() {
return getWarpMasterActiveThreadId() == 0u;
}
/// Return the provided size aligned to the size of a pointer.
__device__ static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void*);
if (Val & (Align-1)) {
Val += Align;
Val &= ~(Align-1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the master thread and warp masters.
// \param RootS: A pointer to the root of the data sharing stack.
// \param InitialDataSize: The initial size of the data in the slot.
EXTERN void __kmpc_initialize_data_sharing_environment(
__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize){
DSPRINT0(DSFLAG_INIT,"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT,"Warp ID: %d\n", WID);
omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void*)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT,"Initial data size: %08x \n", InitialDataSize);
DSPRINT(DSFLAG_INIT,"Root slot at: %016llx \n", (long long)RootS);
DSPRINT(DSFLAG_INIT,"Root slot data-end at: %016llx \n", (long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT,"Root slot next at: %016llx \n", (long long)RootS->Next);
DSPRINT(DSFLAG_INIT,"Shared slot ptr at: %016llx \n", (long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT,"Shared stack ptr at: %016llx \n", (long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT,"Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void* __kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot,
void **SavedSharedStack,
void **SavedSharedFrame,
int32_t *SavedActiveThreads,
size_t SharingDataSize,
size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized
){
DSPRINT0(DSFLAG,"Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized) return (void *) &DataSharingState;
DSPRINT(DSFLAG,"Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG,"Default Data Size %016llx\n", SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = __ballot(true);
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG,"Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG,"Warp ID: %d\n", WID);
DSPRINT(DSFLAG,"Saved slot ptr at: %016llx \n", (long long)SlotP);
DSPRINT(DSFLAG,"Saved stack ptr at: %016llx \n", (long long)StackP);
DSPRINT(DSFLAG,"Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG,"Active threads: %08x \n", ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG,"Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG,"Default Data Size %016llx\n", SharingDefaultDataSize);
DSPRINT(DSFLAG,"Current Start Address %016llx\n", CurrentStartAddress);
DSPRINT(DSFLAG,"Current End Address %016llx\n", CurrentEndAddress);
DSPRINT(DSFLAG,"Required End Address %016llx\n", RequiredEndAddress);
DSPRINT(DSFLAG,"Active Threads %08x\n", ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to reuse one). Also, set the shared stack and slot pointers to the new place. If we do not need to grow the stack, just adapt the stack and frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize) ? SharingDataSize : SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd - (uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG,"Reusing stack slot %016llx\n", (long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG,"Cleaning up -failed reuse - %016llx\n", (long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = ( __kmpc_data_sharing_slot *)malloc(sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG,"New slot allocated %016llx (data size=%016llx)\n", (long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG,"Cleaning up - old not required - %016llx\n", (long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void*)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG,"Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot,
void **SavedSharedStack,
void **SavedSharedFrame,
int32_t *SavedActiveThreads,
int32_t IsEntryPoint
){
DSPRINT0(DSFLAG,"Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint){
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG,"Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment only for the master.
__kmpc_data_sharing_slot *S = IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG,"Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = __ballot(true);
// Only the warp master can restore the stack and frame information, and only if there are no other threads left behind in this environment (i.e. the warp diverged and returns in different places). This only works if we assume that threads will converge right after the call site that started the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG,"Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG,"Active threads: %08x; New mask: %08x\n", CurActive, ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG,"Restored slot ptr at: %016llx \n",(long long)SlotP);
DSPRINT(DSFLAG,"Restored stack ptr at: %016llx \n",(long long)StackP);
DSPRINT(DSFLAG,"Restored frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG,"Active threads: %08x \n", ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG,"Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void* __kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized){
DSPRINT0(DSFLAG,"Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized) return (void *) &DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID >> DS_Max_Worker_Warp_Size_Log2;
DSPRINT(DSFLAG,"Source warp: %d\n", SourceWID);
void *P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG,"Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
//EXTERN void __kmpc_samuel_print(int64_t Bla){
// DSPRINT(DSFLAG,"Sam print: %016llx\n",Bla);
//
//}
| 4293e6971af95181c6c953e2107e4c06dd7b57f6.cu | //===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include <stdio.h>
// Number of threads in the CUDA block.
__device__ static unsigned getNumThreads() {
return blockDim.x;
}
// Thread ID in the CUDA block
__device__ static unsigned getThreadId() {
return threadIdx.x;
}
// Warp ID in the CUDA block
__device__ static unsigned getWarpId() {
return threadIdx.x >> DS_Max_Worker_Warp_Size_Log2;
}
//// Team ID in the CUDA grid
//__device__ static unsigned getTeamId() {
// return blockIdx.x;
//}
// The CUDA thread ID of the master thread.
__device__ static unsigned getMasterThreadId() {
unsigned Mask = DS_Max_Worker_Warp_Size - 1;
return (getNumThreads() - 1) & (~Mask);
}
// The lowest ID among the active threads in the warp.
__device__ static unsigned getWarpMasterActiveThreadId() {
unsigned long long Mask = __ballot(true);
unsigned long long ShNum = 32 - (getThreadId() & DS_Max_Worker_Warp_Size_Log2_Mask);
unsigned long long Sh = Mask << ShNum;
return __popc(Sh);
}
// Return true if this is the master thread.
__device__ static bool IsMasterThread() {
return getMasterThreadId() == getThreadId();
}
// Return true if this is the first thread in the warp.
//static bool IsWarpMasterThread() {
// return (getThreadId() & DS_Max_Worker_Warp_Size_Log2_Mask) == 0u;
//}
// Return true if this is the first active thread in the warp.
__device__ static bool IsWarpMasterActiveThread() {
return getWarpMasterActiveThreadId() == 0u;
}
/// Return the provided size aligned to the size of a pointer.
__device__ static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void*);
if (Val & (Align-1)) {
Val += Align;
Val &= ~(Align-1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the master thread and warp masters.
// \param RootS: A pointer to the root of the data sharing stack.
// \param InitialDataSize: The initial size of the data in the slot.
EXTERN void __kmpc_initialize_data_sharing_environment(
__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize){
DSPRINT0(DSFLAG_INIT,"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT,"Warp ID: %d\n", WID);
omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void*)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT,"Initial data size: %08x \n", InitialDataSize);
DSPRINT(DSFLAG_INIT,"Root slot at: %016llx \n", (long long)RootS);
DSPRINT(DSFLAG_INIT,"Root slot data-end at: %016llx \n", (long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT,"Root slot next at: %016llx \n", (long long)RootS->Next);
DSPRINT(DSFLAG_INIT,"Shared slot ptr at: %016llx \n", (long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT,"Shared stack ptr at: %016llx \n", (long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT,"Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void* __kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot,
void **SavedSharedStack,
void **SavedSharedFrame,
int32_t *SavedActiveThreads,
size_t SharingDataSize,
size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized
){
DSPRINT0(DSFLAG,"Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized) return (void *) &DataSharingState;
DSPRINT(DSFLAG,"Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG,"Default Data Size %016llx\n", SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = __ballot(true);
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG,"Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG,"Warp ID: %d\n", WID);
DSPRINT(DSFLAG,"Saved slot ptr at: %016llx \n", (long long)SlotP);
DSPRINT(DSFLAG,"Saved stack ptr at: %016llx \n", (long long)StackP);
DSPRINT(DSFLAG,"Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG,"Active threads: %08x \n", ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG,"Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG,"Default Data Size %016llx\n", SharingDefaultDataSize);
DSPRINT(DSFLAG,"Current Start Address %016llx\n", CurrentStartAddress);
DSPRINT(DSFLAG,"Current End Address %016llx\n", CurrentEndAddress);
DSPRINT(DSFLAG,"Required End Address %016llx\n", RequiredEndAddress);
DSPRINT(DSFLAG,"Active Threads %08x\n", ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to reuse one). Also, set the shared stack and slot pointers to the new place. If we do not need to grow the stack, just adapt the stack and frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize) ? SharingDataSize : SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd - (uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG,"Reusing stack slot %016llx\n", (long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG,"Cleaning up -failed reuse - %016llx\n", (long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = ( __kmpc_data_sharing_slot *)malloc(sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG,"New slot allocated %016llx (data size=%016llx)\n", (long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG,"Cleaning up - old not required - %016llx\n", (long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void*)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG,"Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot,
void **SavedSharedStack,
void **SavedSharedFrame,
int32_t *SavedActiveThreads,
int32_t IsEntryPoint
){
DSPRINT0(DSFLAG,"Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint){
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG,"Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment only for the master.
__kmpc_data_sharing_slot *S = IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG,"Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = __ballot(true);
// Only the warp master can restore the stack and frame information, and only if there are no other threads left behind in this environment (i.e. the warp diverged and returns in different places). This only works if we assume that threads will converge right after the call site that started the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG,"Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG,"Active threads: %08x; New mask: %08x\n", CurActive, ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG,"Restored slot ptr at: %016llx \n",(long long)SlotP);
DSPRINT(DSFLAG,"Restored stack ptr at: %016llx \n",(long long)StackP);
DSPRINT(DSFLAG,"Restored frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG,"Active threads: %08x \n", ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG,"Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void* __kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized){
DSPRINT0(DSFLAG,"Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized) return (void *) &DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID >> DS_Max_Worker_Warp_Size_Log2;
DSPRINT(DSFLAG,"Source warp: %d\n", SourceWID);
void *P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG,"Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
//EXTERN void __kmpc_samuel_print(int64_t Bla){
// DSPRINT(DSFLAG,"Sam print: %016llx\n",Bla);
//
//}
|
846e8571d7b107998ff7cd034222db2d34f45317.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "dali/core/cuda_event.h"
#include "dali/core/cuda_stream.h"
#include "dali/operators/math/expressions/arithmetic_meta.h"
#include "dali/operators/math/expressions/expression_impl_gpu_binary.cuh"
#include "dali/test/dali_operator_test.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/test_tensors.h"
namespace dali {
namespace expr {
template <ArithmeticOp op_ = ArithmeticOp::add, typename Result_ = float, typename Left_ = float,
typename Right_ = float, int IsLeftTensor_ = true, int IsRightTensor_ = false,
int blocks_x_ = 128, int thread_num_ = 32, int batch_size_ = 256, int tile_size_ = 65536,
int sample_size_ = 1024 * 1024>
struct ArithmOpParams {
static constexpr ArithmeticOp op = op_;
using Result = Result_;
using Left = Left_;
using Right = Right_;
static constexpr int IsLeftTensor = IsLeftTensor_;
static constexpr int IsRightTensor = IsRightTensor_;
static constexpr int blocks_x = blocks_x_;
static constexpr int thread_num = thread_num_;
static constexpr int batch_size = batch_size_;
static constexpr int tile_size = tile_size_;
static constexpr int sample_size = sample_size_;
static constexpr int tiles_per_sample = sample_size / tile_size;
static constexpr int num_tiles = batch_size * tiles_per_sample;
static_assert(sample_size >= tile_size, "This test doesn't support samples smaller than tiles.");
};
template <typename TestConfig>
struct BinaryArithmeticOpGpuPerfTest : public ::testing::Test {
void SetUp() override {
stream = HIPStreamMasqueradingAsCUDA::Create(true);
/// Fill tile descriptors (shapes)
tile_descs.resize(TestConfig::num_tiles);
for (int sample_id = 0; sample_id < TestConfig::batch_size; sample_id++) {
for (int extent_id = 0; extent_id < TestConfig::tiles_per_sample; extent_id++) {
int tile_id = sample_id * TestConfig::tiles_per_sample + extent_id;
tile_descs[tile_id].sample_idx = sample_id;
tile_descs[tile_id].offset = TestConfig::tile_size * extent_id;
tile_descs[tile_id].size = TestConfig::tile_size;
}
}
// Reshape memory for those tiles
result.reshape(uniform_list_shape<1>(TestConfig::batch_size,
{TestConfig::tile_size * TestConfig::tiles_per_sample}));
if (TestConfig::IsLeftTensor) {
left.reshape(uniform_list_shape<1>(TestConfig::batch_size,
{TestConfig::tile_size * TestConfig::tiles_per_sample}));
} else {
left.reshape(uniform_list_shape<1>(TestConfig::batch_size, {1}));
}
if (TestConfig::IsRightTensor) {
right.reshape(uniform_list_shape<1>(TestConfig::batch_size,
{TestConfig::tile_size * TestConfig::tiles_per_sample}));
} else {
right.reshape(uniform_list_shape<1>(TestConfig::batch_size, {1}));
}
Left l{};
Right r{};
auto fill_left = [&l]() { return l += 1; };
auto fill_right = [&r]() { return r += 1; };
Fill(left.cpu(), fill_left);
Fill(right.cpu(), fill_right);
// Fill pointers for tiles
samples_data.reshape(uniform_list_shape<1>(1, {TestConfig::batch_size}));
tiles_data.reshape(uniform_list_shape<1>(1, {TestConfig::num_tiles}));
auto samples_cpu = samples_data.cpu()[0];
auto tiles_cpu = tiles_data.cpu()[0];
// TestTensorList just allocates memory, this can leave SmallVector in weird state
memset(samples_cpu.data, 0, TestConfig::batch_size * sizeof(SampleDescGPU<2>));
memset(tiles_cpu.data, 0, TestConfig::num_tiles * sizeof(TileDesc));
for (int sample_idx = 0; sample_idx < TestConfig::batch_size; sample_idx++) {
auto &sample = samples_cpu.data[sample_idx];
sample.ndim = 1;
auto out_tv = left.gpu()[sample_idx];
TensorShape<> out_strides;
kernels::CalcStrides(out_strides, out_tv.shape);
sample.output.data = out_tv.data;
sample.output.dtype = type2id<Result>::value;
sample.output.shape[0] = out_tv.shape[0];
auto left_tv = left.gpu()[sample_idx];
TensorShape<> left_strides;
kernels::CalcStrides(left_strides, left_tv.shape);
sample.args[0].data = left_tv.data;
sample.args[0].dtype = type2id<Result>::value;
sample.args[0].shape[0] = volume(left_tv.shape);
auto right_tv = right.gpu()[sample_idx];
TensorShape<> right_strides;
kernels::CalcStrides(right_strides, right_tv.shape);
sample.args[1].data = right_tv.data;
sample.args[1].dtype = type2id<Result>::value;
sample.args[1].shape[0] = volume(right_tv.shape);
for (int extent_idx = 0; extent_idx < TestConfig::tiles_per_sample; extent_idx++) {
int tile_idx = sample_idx * TestConfig::tiles_per_sample + extent_idx;
tiles_cpu.data[tile_idx] = tile_descs[tile_idx];
}
}
tiles_gpu = tiles_data.gpu(stream)[0].data;
samples_gpu = samples_data.gpu(stream)[0].data;
}
void MeasurePerf() {
hipLaunchKernelGGL(( ExecuteTiledBinOpND<TestConfig::op, Result, Left, Right>)
, dim3(grid), dim3(block), 0, stream, samples_gpu, tiles_gpu);
CUDAEvent start = CUDAEvent::CreateWithFlags(0);
CUDAEvent end = CUDAEvent::CreateWithFlags(0);
CUDA_CALL(hipEventRecord(start, stream));
constexpr int kIters = 100;
for (int i = 0; i < kIters; i++) {
hipLaunchKernelGGL(( ExecuteTiledBinOpND<TestConfig::op, Result, Left, Right>)
, dim3(grid), dim3(block), 0, stream, samples_gpu, tiles_gpu);
}
CUDA_CALL(hipEventRecord(end, stream));
CUDA_CALL(hipDeviceSynchronize());
float time;
CUDA_CALL(hipEventElapsedTime(&time, start, end));
time *= (1e+6f / kIters); // convert to nanoseconds / 100 samples
int64_t data_size = 0;
data_size +=
static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Result);
if (TestConfig::IsLeftTensor)
data_size +=
static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Left);
if (TestConfig::IsRightTensor)
data_size +=
static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Right);
std::cerr << "Throughput: " << data_size / time << " GB/s\n";
}
using Result = typename TestConfig::Result;
using Left = typename TestConfig::Left;
using Right = typename TestConfig::Right;
// For kernel launch
dim3 grid = dim3(TestConfig::blocks_x, TestConfig::num_tiles, 1);
dim3 block = dim3(TestConfig::thread_num, 1, 1);
// Samples, tiles and data
std::vector<TileDesc> tile_descs;
kernels::TestTensorList<TileDesc, 1> tiles_data;
kernels::TestTensorList<SampleDescGPU<2>, 1> samples_data;
kernels::TestTensorList<Result, 1> result;
kernels::TestTensorList<Left, 1> left;
kernels::TestTensorList<Right, 1> right;
HIPStreamMasqueradingAsCUDA stream;
const SampleDescGPU<2> *samples_gpu;
const TileDesc *tiles_gpu;
};
TYPED_TEST_SUITE_P(BinaryArithmeticOpGpuPerfTest);
TYPED_TEST_P(BinaryArithmeticOpGpuPerfTest, Perf) {
std::cerr << "Blocks_x: " << TypeParam::blocks_x << ", thread_num: " << TypeParam::thread_num
<< ", tile_size: " << TypeParam::tile_size / 1024.f
<< "KB, sample_size: " << TypeParam::sample_size / 1048576.f << "MB" << std::endl;
// TypeParam n = 0;
this->MeasurePerf();
}
REGISTER_TYPED_TEST_SUITE_P(BinaryArithmeticOpGpuPerfTest, Perf);
using TestConfigs = ::testing::Types<
// op, Result, Left, Right, IsLeftTensor, IsRightTensor, blocks_x, thread_num, batch, tile,
// sample Test Tensor op Constant
ArithmOpParams< // old config
ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 16384, 1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 32768,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 131072,
1024 * 1024>,
// test small input data, forcing 1 tile per sample, a bit bigger batch,
// to measure how performs with smaller inputs
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 16384,
16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 32768,
32768>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 65536,
65536>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 16384,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 32768,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 131072,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 16384, 16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 32768, 32768>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 65536, 65536>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 16384,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 32768,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 131072,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 16384,
16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 32768,
32768>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 65536,
65536>,
// Test Tensor op Tensor
ArithmOpParams< // old config
ArithmeticOp::add, float, float, float, true, true, 128, 256, 256, 16384, 1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 256, 512, 16384, 16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 64, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 64, 256, 512, 16384, 16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 128, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 128, 512, 16384,
16384>>;
INSTANTIATE_TYPED_TEST_SUITE_P(BinaryArithmeticOpGpu, BinaryArithmeticOpGpuPerfTest, TestConfigs);
} // namespace expr
} // namespace dali
| 846e8571d7b107998ff7cd034222db2d34f45317.cu | // Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "dali/core/cuda_event.h"
#include "dali/core/cuda_stream.h"
#include "dali/operators/math/expressions/arithmetic_meta.h"
#include "dali/operators/math/expressions/expression_impl_gpu_binary.cuh"
#include "dali/test/dali_operator_test.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/test_tensors.h"
namespace dali {
namespace expr {
template <ArithmeticOp op_ = ArithmeticOp::add, typename Result_ = float, typename Left_ = float,
typename Right_ = float, int IsLeftTensor_ = true, int IsRightTensor_ = false,
int blocks_x_ = 128, int thread_num_ = 32, int batch_size_ = 256, int tile_size_ = 65536,
int sample_size_ = 1024 * 1024>
struct ArithmOpParams {
static constexpr ArithmeticOp op = op_;
using Result = Result_;
using Left = Left_;
using Right = Right_;
static constexpr int IsLeftTensor = IsLeftTensor_;
static constexpr int IsRightTensor = IsRightTensor_;
static constexpr int blocks_x = blocks_x_;
static constexpr int thread_num = thread_num_;
static constexpr int batch_size = batch_size_;
static constexpr int tile_size = tile_size_;
static constexpr int sample_size = sample_size_;
static constexpr int tiles_per_sample = sample_size / tile_size;
static constexpr int num_tiles = batch_size * tiles_per_sample;
static_assert(sample_size >= tile_size, "This test doesn't support samples smaller than tiles.");
};
template <typename TestConfig>
struct BinaryArithmeticOpGpuPerfTest : public ::testing::Test {
void SetUp() override {
stream = CUDAStream::Create(true);
/// Fill tile descriptors (shapes)
tile_descs.resize(TestConfig::num_tiles);
for (int sample_id = 0; sample_id < TestConfig::batch_size; sample_id++) {
for (int extent_id = 0; extent_id < TestConfig::tiles_per_sample; extent_id++) {
int tile_id = sample_id * TestConfig::tiles_per_sample + extent_id;
tile_descs[tile_id].sample_idx = sample_id;
tile_descs[tile_id].offset = TestConfig::tile_size * extent_id;
tile_descs[tile_id].size = TestConfig::tile_size;
}
}
// Reshape memory for those tiles
result.reshape(uniform_list_shape<1>(TestConfig::batch_size,
{TestConfig::tile_size * TestConfig::tiles_per_sample}));
if (TestConfig::IsLeftTensor) {
left.reshape(uniform_list_shape<1>(TestConfig::batch_size,
{TestConfig::tile_size * TestConfig::tiles_per_sample}));
} else {
left.reshape(uniform_list_shape<1>(TestConfig::batch_size, {1}));
}
if (TestConfig::IsRightTensor) {
right.reshape(uniform_list_shape<1>(TestConfig::batch_size,
{TestConfig::tile_size * TestConfig::tiles_per_sample}));
} else {
right.reshape(uniform_list_shape<1>(TestConfig::batch_size, {1}));
}
Left l{};
Right r{};
auto fill_left = [&l]() { return l += 1; };
auto fill_right = [&r]() { return r += 1; };
Fill(left.cpu(), fill_left);
Fill(right.cpu(), fill_right);
// Fill pointers for tiles
samples_data.reshape(uniform_list_shape<1>(1, {TestConfig::batch_size}));
tiles_data.reshape(uniform_list_shape<1>(1, {TestConfig::num_tiles}));
auto samples_cpu = samples_data.cpu()[0];
auto tiles_cpu = tiles_data.cpu()[0];
// TestTensorList just allocates memory, this can leave SmallVector in weird state
memset(samples_cpu.data, 0, TestConfig::batch_size * sizeof(SampleDescGPU<2>));
memset(tiles_cpu.data, 0, TestConfig::num_tiles * sizeof(TileDesc));
for (int sample_idx = 0; sample_idx < TestConfig::batch_size; sample_idx++) {
auto &sample = samples_cpu.data[sample_idx];
sample.ndim = 1;
auto out_tv = left.gpu()[sample_idx];
TensorShape<> out_strides;
kernels::CalcStrides(out_strides, out_tv.shape);
sample.output.data = out_tv.data;
sample.output.dtype = type2id<Result>::value;
sample.output.shape[0] = out_tv.shape[0];
auto left_tv = left.gpu()[sample_idx];
TensorShape<> left_strides;
kernels::CalcStrides(left_strides, left_tv.shape);
sample.args[0].data = left_tv.data;
sample.args[0].dtype = type2id<Result>::value;
sample.args[0].shape[0] = volume(left_tv.shape);
auto right_tv = right.gpu()[sample_idx];
TensorShape<> right_strides;
kernels::CalcStrides(right_strides, right_tv.shape);
sample.args[1].data = right_tv.data;
sample.args[1].dtype = type2id<Result>::value;
sample.args[1].shape[0] = volume(right_tv.shape);
for (int extent_idx = 0; extent_idx < TestConfig::tiles_per_sample; extent_idx++) {
int tile_idx = sample_idx * TestConfig::tiles_per_sample + extent_idx;
tiles_cpu.data[tile_idx] = tile_descs[tile_idx];
}
}
tiles_gpu = tiles_data.gpu(stream)[0].data;
samples_gpu = samples_data.gpu(stream)[0].data;
}
void MeasurePerf() {
ExecuteTiledBinOpND<TestConfig::op, Result, Left, Right>
<<<grid, block, 0, stream>>>(samples_gpu, tiles_gpu);
CUDAEvent start = CUDAEvent::CreateWithFlags(0);
CUDAEvent end = CUDAEvent::CreateWithFlags(0);
CUDA_CALL(cudaEventRecord(start, stream));
constexpr int kIters = 100;
for (int i = 0; i < kIters; i++) {
ExecuteTiledBinOpND<TestConfig::op, Result, Left, Right>
<<<grid, block, 0, stream>>>(samples_gpu, tiles_gpu);
}
CUDA_CALL(cudaEventRecord(end, stream));
CUDA_CALL(cudaDeviceSynchronize());
float time;
CUDA_CALL(cudaEventElapsedTime(&time, start, end));
time *= (1e+6f / kIters); // convert to nanoseconds / 100 samples
int64_t data_size = 0;
data_size +=
static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Result);
if (TestConfig::IsLeftTensor)
data_size +=
static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Left);
if (TestConfig::IsRightTensor)
data_size +=
static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Right);
std::cerr << "Throughput: " << data_size / time << " GB/s\n";
}
using Result = typename TestConfig::Result;
using Left = typename TestConfig::Left;
using Right = typename TestConfig::Right;
// For kernel launch
dim3 grid = dim3(TestConfig::blocks_x, TestConfig::num_tiles, 1);
dim3 block = dim3(TestConfig::thread_num, 1, 1);
// Samples, tiles and data
std::vector<TileDesc> tile_descs;
kernels::TestTensorList<TileDesc, 1> tiles_data;
kernels::TestTensorList<SampleDescGPU<2>, 1> samples_data;
kernels::TestTensorList<Result, 1> result;
kernels::TestTensorList<Left, 1> left;
kernels::TestTensorList<Right, 1> right;
CUDAStream stream;
const SampleDescGPU<2> *samples_gpu;
const TileDesc *tiles_gpu;
};
TYPED_TEST_SUITE_P(BinaryArithmeticOpGpuPerfTest);
TYPED_TEST_P(BinaryArithmeticOpGpuPerfTest, Perf) {
std::cerr << "Blocks_x: " << TypeParam::blocks_x << ", thread_num: " << TypeParam::thread_num
<< ", tile_size: " << TypeParam::tile_size / 1024.f
<< "KB, sample_size: " << TypeParam::sample_size / 1048576.f << "MB" << std::endl;
// TypeParam n = 0;
this->MeasurePerf();
}
REGISTER_TYPED_TEST_SUITE_P(BinaryArithmeticOpGpuPerfTest, Perf);
using TestConfigs = ::testing::Types<
// op, Result, Left, Right, IsLeftTensor, IsRightTensor, blocks_x, thread_num, batch, tile,
// sample Test Tensor op Constant
ArithmOpParams< // old config
ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 16384, 1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 32768,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 131072,
1024 * 1024>,
// test small input data, forcing 1 tile per sample, a bit bigger batch,
// to measure how performs with smaller inputs
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 16384,
16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 32768,
32768>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 65536,
65536>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 16384,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 32768,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 131072,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 16384, 16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 32768, 32768>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 65536, 65536>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 16384,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 32768,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 131072,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 16384,
16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 32768,
32768>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 65536,
65536>,
// Test Tensor op Tensor
ArithmOpParams< // old config
ArithmeticOp::add, float, float, float, true, true, 128, 256, 256, 16384, 1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 256, 512, 16384, 16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 64, 256, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 64, 256, 512, 16384, 16384>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 128, 256, 65536,
1024 * 1024>,
ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 128, 512, 16384,
16384>>;
INSTANTIATE_TYPED_TEST_SUITE_P(BinaryArithmeticOpGpu, BinaryArithmeticOpGpuPerfTest, TestConfigs);
} // namespace expr
} // namespace dali
|
f7afad4587631f872710052693fde94f119beaeb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#ifdef _FLOAT_
typedef float basetype;
#define labelelem "floats"
#elif _DOUBLE_
typedef double basetype;
#define labelelem "doubles"
#else
typedef int basetype;// Tipo para elementos: Int PREDETERMINADO
#define labelelem "ints"
#endif
/* Cosas para calcular el tiempo */
#include <sys/time.h>
#include <sys/resource.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
/* Fin de cosas para calcular el tiempo */
void init_CPU_array(basetype array[], const unsigned int n)
{
unsigned int i;
for(i = 0; i < n; i++) {
array[i] = (basetype)i;
}
}
void transponer_matriz_CPU(basetype *m, basetype *t, int n)
{
for (int i = 0; i < n; ++i){
for (int j = 0; j < n; ++j){
t[i+j*n] = m[i*n+j];
}
}
}
__global__ void transponer_kernel_cuda(basetype * m, basetype * t, const int n){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n*n){
//printf("soy %d\n", global_id);
t[global_id] = m[global_id];
}
}
int transponer_matriz_GPU(basetype *m,basetype *t, int n)
{
double timetick;
int blk_size=64;
hipError_t error;
// Nmero de bytes de cada uno de nuestros vectores
unsigned int numBytes = n *n * sizeof(basetype);
// Reservamos memoria global del device (GPU) para el array y lo copiamos
basetype *gm, *gt;
timetick = dwalltime();
hipMalloc((void **) &gm, numBytes);
hipMalloc((void **) >, numBytes);
printf("-> Tiempo de alocacion en memoria global de GPU %f\n", dwalltime() - timetick);
timetick = dwalltime();
hipMemcpy(gm, m, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
printf("-> Tiempo de copia de memoria CPU =>> GPU %f\n", dwalltime() - timetick);
// Bloque unidimensional de hilos (*blk_size* hilos)
dim3 dimBlock(blk_size);
// Grid unidimensional (*ceil(n/blk_size)* bloques)
dim3 dimGrid((((n + dimBlock.x - 1) / dimBlock.x)));
//printf("%d %d",dimBlock.x,dimGrid.x);
// Lanzamos ejecucin del kernel en la GPU
//timestamp(start); // Medimos tiempo de clculo en GPU
timetick = dwalltime();
hipLaunchKernelGGL(( transponer_kernel_cuda), dim3(dimGrid), dim3(dimBlock), 0, 0, gm,gt,n);
error=hipDeviceSynchronize();
printf("%s\n", hipGetErrorString(error));
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
//timestamp(end);
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
hipMemcpy(t, gt, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
// Liberamos memoria global del device utilizada
hipFree (gm);
hipFree (gt);
return 0;
}
int main(int argc, char const *argv[])
{
basetype *m;
basetype *t;
int b=3; //=1024;
int n=b*b;
double timetick;
// Aloco memoria para los vectores
timetick = dwalltime();
m=(basetype *) malloc(n*sizeof(basetype));
t=(basetype *) malloc(n*sizeof(basetype));
printf("-> Tiempo de allocacion de vectores en CPU %f\n", dwalltime() - timetick);
// Inicializo los arreglos
// timetick = dwalltime();
init_CPU_array(m,n);
// printf("-> Tiempo de inicializacion de vectores en CPU %f\n", dwalltime() - timetick);
// Sumo los arreglos
timetick = dwalltime();
transponer_matriz_CPU(m,t,b);
printf("-> Tiempo de suma de vectores en CPU %f\n", dwalltime() - timetick);
transponer_matriz_GPU(m,t,b);
/* for (int i = 0; i < n; ++i)
{
printf("%d\n", m[i]);
}
printf("%p\n", t);
for (int i = 0; i < n; ++i)
{
printf("%d\n", t[i]);
}
*/
return 0;
}
| f7afad4587631f872710052693fde94f119beaeb.cu | #include <stdio.h>
#include <stdlib.h>
#ifdef _FLOAT_
typedef float basetype;
#define labelelem "floats"
#elif _DOUBLE_
typedef double basetype;
#define labelelem "doubles"
#else
typedef int basetype;// Tipo para elementos: Int PREDETERMINADO
#define labelelem "ints"
#endif
/* Cosas para calcular el tiempo */
#include <sys/time.h>
#include <sys/resource.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
/* Fin de cosas para calcular el tiempo */
void init_CPU_array(basetype array[], const unsigned int n)
{
unsigned int i;
for(i = 0; i < n; i++) {
array[i] = (basetype)i;
}
}
void transponer_matriz_CPU(basetype *m, basetype *t, int n)
{
for (int i = 0; i < n; ++i){
for (int j = 0; j < n; ++j){
t[i+j*n] = m[i*n+j];
}
}
}
__global__ void transponer_kernel_cuda(basetype * m, basetype * t, const int n){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n*n){
//printf("soy %d\n", global_id);
t[global_id] = m[global_id];
}
}
int transponer_matriz_GPU(basetype *m,basetype *t, int n)
{
double timetick;
int blk_size=64;
cudaError_t error;
// Número de bytes de cada uno de nuestros vectores
unsigned int numBytes = n *n * sizeof(basetype);
// Reservamos memoria global del device (GPU) para el array y lo copiamos
basetype *gm, *gt;
timetick = dwalltime();
cudaMalloc((void **) &gm, numBytes);
cudaMalloc((void **) >, numBytes);
printf("-> Tiempo de alocacion en memoria global de GPU %f\n", dwalltime() - timetick);
timetick = dwalltime();
cudaMemcpy(gm, m, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
printf("-> Tiempo de copia de memoria CPU =>> GPU %f\n", dwalltime() - timetick);
// Bloque unidimensional de hilos (*blk_size* hilos)
dim3 dimBlock(blk_size);
// Grid unidimensional (*ceil(n/blk_size)* bloques)
dim3 dimGrid((((n + dimBlock.x - 1) / dimBlock.x)));
//printf("%d %d",dimBlock.x,dimGrid.x);
// Lanzamos ejecución del kernel en la GPU
//timestamp(start); // Medimos tiempo de cálculo en GPU
timetick = dwalltime();
transponer_kernel_cuda<<<dimGrid, dimBlock>>>(gm,gt,n);
error=cudaDeviceSynchronize();
printf("%s\n", cudaGetErrorString(error));
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
//timestamp(end);
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
cudaMemcpy(t, gt, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
// Liberamos memoria global del device utilizada
cudaFree (gm);
cudaFree (gt);
return 0;
}
int main(int argc, char const *argv[])
{
basetype *m;
basetype *t;
int b=3; //=1024;
int n=b*b;
double timetick;
// Aloco memoria para los vectores
timetick = dwalltime();
m=(basetype *) malloc(n*sizeof(basetype));
t=(basetype *) malloc(n*sizeof(basetype));
printf("-> Tiempo de allocacion de vectores en CPU %f\n", dwalltime() - timetick);
// Inicializo los arreglos
// timetick = dwalltime();
init_CPU_array(m,n);
// printf("-> Tiempo de inicializacion de vectores en CPU %f\n", dwalltime() - timetick);
// Sumo los arreglos
timetick = dwalltime();
transponer_matriz_CPU(m,t,b);
printf("-> Tiempo de suma de vectores en CPU %f\n", dwalltime() - timetick);
transponer_matriz_GPU(m,t,b);
/* for (int i = 0; i < n; ++i)
{
printf("%d\n", m[i]);
}
printf("%p\n", t);
for (int i = 0; i < n; ++i)
{
printf("%d\n", t[i]);
}
*/
return 0;
}
|
9e11423fe00c374ac73c3d471ebe3c49475e4ad9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
__constant__ double c_CDF[1000];
__constant__ double c_arrayY[1000];
__constant__ double c_arrayX[1000];
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(c_CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = c_arrayX[index];
yj[i] = c_arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_arrayX,arrayX,sizeof(double)*Nparticles);
hipMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
hipMemcpyToSymbol(c_arrayY,arrayY,sizeof(double)*Nparticles);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
hipDeviceSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
hipFree(u_GPU);
hipFree(CDF_GPU);
hipFree(yj_GPU);
hipFree(xj_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 9e11423fe00c374ac73c3d471ebe3c49475e4ad9.cu | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
__constant__ double c_CDF[1000];
__constant__ double c_arrayY[1000];
__constant__ double c_arrayX[1000];
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(c_CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = c_arrayX[index];
yj[i] = c_arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_arrayX,arrayX,sizeof(double)*Nparticles);
cudaMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
cudaMemcpyToSymbol(c_arrayY,arrayY,sizeof(double)*Nparticles);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
cudaThreadSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
cudaFree(u_GPU);
cudaFree(CDF_GPU);
cudaFree(yj_GPU);
cudaFree(xj_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
0552ff58c4b2f76bf50b36436fa3b92d0d44f0b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simulate elongation of Xenopus aggregates, see Green (2014) Dev Dyn.
#include <hiprand/hiprand_kernel.h>
#include <math.h>
#include <stdio.h>
#include <thread>
#include "../include/cudebug.cuh"
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/links.cuh"
#include "../include/polarity.cuh"
#include "../include/solvers.cuh"
#include "../include/vtk.cuh"
const auto n_cells = 500;
const auto r_protrusion = 1.5f;
const auto prots_per_cell = 1;
const auto n_time_steps = 500;
const auto dt = 0.2f;
__device__ Po_cell lb_force(Po_cell Xi, Po_cell r, float dist, int i, int j)
{
Po_cell dF{0};
if (i == j) return dF;
if (dist > 1) return dF;
auto F = fmaxf(0.7 - dist, 0) * 2 - fmaxf(dist - 0.8, 0) / 2;
dF.x = r.x * F / dist;
dF.y = r.y * F / dist;
dF.z = r.z * F / dist;
return dF;
}
__device__ void protrusion_force(const Po_cell* __restrict__ d_X, const int a,
const int b, const float strength, Po_cell* d_dX)
{
auto r = d_X[a] - d_X[b];
auto dist = norm3df(r.x, r.y, r.z);
atomicAdd(&d_dX[a].x, -strength * r.x / dist);
atomicAdd(&d_dX[a].y, -strength * r.y / dist);
atomicAdd(&d_dX[a].z, -strength * r.z / dist);
atomicAdd(&d_dX[b].x, strength * r.x / dist);
atomicAdd(&d_dX[b].y, strength * r.y / dist);
atomicAdd(&d_dX[b].z, strength * r.z / dist);
Polarity r_hat{acosf(-r.z / dist), atan2(-r.y, -r.x)};
auto Fa = bidirectional_polarization_force(d_X[a], r_hat);
atomicAdd(&d_dX[a].theta, strength * Fa.theta);
atomicAdd(&d_dX[a].phi, strength * Fa.phi);
auto Fb = bidirectional_polarization_force(d_X[b], r_hat);
atomicAdd(&d_dX[b].theta, strength * Fb.theta);
atomicAdd(&d_dX[b].phi, strength * Fb.phi);
}
__global__ void update_protrusions(const Grid* __restrict__ d_grid,
const Po_cell* __restrict d_X, hiprandState_t* d_state, Link* d_link)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_cells * prots_per_cell) return;
auto j = static_cast<int>((i + 0.5) / prots_per_cell);
auto rnd_cube =
d_grid->d_cube_id[j] +
d_nhood[min(static_cast<int>(hiprand_uniform(&d_state[i]) * 27), 26)];
auto cells_in_cube =
d_grid->d_cube_end[rnd_cube] - d_grid->d_cube_start[rnd_cube];
if (cells_in_cube < 1) return;
auto rnd_cell =
min(static_cast<int>(hiprand_uniform(&d_state[i]) * cells_in_cube),
cells_in_cube - 1);
auto a = d_grid->d_point_id[j];
auto b = d_grid->d_point_id[d_grid->d_cube_start[rnd_cube] + rnd_cell];
D_ASSERT(a >= 0);
D_ASSERT(a < n_cells);
D_ASSERT(b >= 0);
D_ASSERT(b < n_cells);
if (a == b) return;
auto r = d_X[a] - d_X[b];
auto dist = norm3df(r.x, r.y, r.z);
if (dist > r_protrusion) return;
Polarity r_hat{acosf(-r.z / dist), atan2(-r.y, -r.x)};
auto from_front_a = pol_dot_product(d_X[a], r_hat) > 0.7 / 2;
auto to_back_b = pol_dot_product(d_X[b], r_hat) > 0.7 / 2;
if ((from_front_a and to_back_b)) {
d_link[a * prots_per_cell + i % prots_per_cell].a = a;
d_link[a * prots_per_cell + i % prots_per_cell].b = b;
}
}
int main(int argc, const char* argv[])
{
// Prepare initial state
Solution<Po_cell, Grid_solver> cells{n_cells};
random_disk(0.733333, cells);
for (auto i = 0; i < n_cells; i++) {
cells.h_X[i].x = cells.h_X[i].z;
cells.h_X[i].z = rand() / (RAND_MAX + 1.) / 2;
cells.h_X[i].theta = M_PI / 2 + (rand() / (RAND_MAX + 1.) - 0.5) / 2;
// cells.h_X[i].phi = 2.*M_PI*rand()/(RAND_MAX + 1.);
auto phi = atan2(-cells.h_X[i].y, -cells.h_X[i].x);
cells.h_X[i].phi = phi + M_PI / 2;
}
cells.copy_to_device();
Links protrusions{n_cells * prots_per_cell};
auto intercalation = [&protrusions](
const Po_cell* __restrict__ d_X, Po_cell* d_dX) {
return link_forces(protrusions, d_X, d_dX);
};
// Simulate elongation
Vtk_output output{"aggregate"};
Grid grid{n_cells};
for (auto time_step = 0; time_step <= n_time_steps; time_step++) {
cells.copy_to_host();
protrusions.copy_to_host();
grid.build(cells, r_protrusion);
hipLaunchKernelGGL(( update_protrusions), dim3((protrusions.get_d_n() + 32 - 1) / 32), dim3(32), 0, 0,
grid.d_grid, cells.d_X, protrusions.d_state, protrusions.d_link);
cells.take_step<lb_force>(dt, intercalation);
output.write_positions(cells);
output.write_links(protrusions);
output.write_polarity(cells);
}
return 0;
}
| 0552ff58c4b2f76bf50b36436fa3b92d0d44f0b7.cu | // Simulate elongation of Xenopus aggregates, see Green (2014) Dev Dyn.
#include <curand_kernel.h>
#include <math.h>
#include <stdio.h>
#include <thread>
#include "../include/cudebug.cuh"
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/links.cuh"
#include "../include/polarity.cuh"
#include "../include/solvers.cuh"
#include "../include/vtk.cuh"
const auto n_cells = 500;
const auto r_protrusion = 1.5f;
const auto prots_per_cell = 1;
const auto n_time_steps = 500;
const auto dt = 0.2f;
__device__ Po_cell lb_force(Po_cell Xi, Po_cell r, float dist, int i, int j)
{
Po_cell dF{0};
if (i == j) return dF;
if (dist > 1) return dF;
auto F = fmaxf(0.7 - dist, 0) * 2 - fmaxf(dist - 0.8, 0) / 2;
dF.x = r.x * F / dist;
dF.y = r.y * F / dist;
dF.z = r.z * F / dist;
return dF;
}
__device__ void protrusion_force(const Po_cell* __restrict__ d_X, const int a,
const int b, const float strength, Po_cell* d_dX)
{
auto r = d_X[a] - d_X[b];
auto dist = norm3df(r.x, r.y, r.z);
atomicAdd(&d_dX[a].x, -strength * r.x / dist);
atomicAdd(&d_dX[a].y, -strength * r.y / dist);
atomicAdd(&d_dX[a].z, -strength * r.z / dist);
atomicAdd(&d_dX[b].x, strength * r.x / dist);
atomicAdd(&d_dX[b].y, strength * r.y / dist);
atomicAdd(&d_dX[b].z, strength * r.z / dist);
Polarity r_hat{acosf(-r.z / dist), atan2(-r.y, -r.x)};
auto Fa = bidirectional_polarization_force(d_X[a], r_hat);
atomicAdd(&d_dX[a].theta, strength * Fa.theta);
atomicAdd(&d_dX[a].phi, strength * Fa.phi);
auto Fb = bidirectional_polarization_force(d_X[b], r_hat);
atomicAdd(&d_dX[b].theta, strength * Fb.theta);
atomicAdd(&d_dX[b].phi, strength * Fb.phi);
}
__global__ void update_protrusions(const Grid* __restrict__ d_grid,
const Po_cell* __restrict d_X, curandState* d_state, Link* d_link)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_cells * prots_per_cell) return;
auto j = static_cast<int>((i + 0.5) / prots_per_cell);
auto rnd_cube =
d_grid->d_cube_id[j] +
d_nhood[min(static_cast<int>(curand_uniform(&d_state[i]) * 27), 26)];
auto cells_in_cube =
d_grid->d_cube_end[rnd_cube] - d_grid->d_cube_start[rnd_cube];
if (cells_in_cube < 1) return;
auto rnd_cell =
min(static_cast<int>(curand_uniform(&d_state[i]) * cells_in_cube),
cells_in_cube - 1);
auto a = d_grid->d_point_id[j];
auto b = d_grid->d_point_id[d_grid->d_cube_start[rnd_cube] + rnd_cell];
D_ASSERT(a >= 0);
D_ASSERT(a < n_cells);
D_ASSERT(b >= 0);
D_ASSERT(b < n_cells);
if (a == b) return;
auto r = d_X[a] - d_X[b];
auto dist = norm3df(r.x, r.y, r.z);
if (dist > r_protrusion) return;
Polarity r_hat{acosf(-r.z / dist), atan2(-r.y, -r.x)};
auto from_front_a = pol_dot_product(d_X[a], r_hat) > 0.7 / 2;
auto to_back_b = pol_dot_product(d_X[b], r_hat) > 0.7 / 2;
if ((from_front_a and to_back_b)) {
d_link[a * prots_per_cell + i % prots_per_cell].a = a;
d_link[a * prots_per_cell + i % prots_per_cell].b = b;
}
}
int main(int argc, const char* argv[])
{
// Prepare initial state
Solution<Po_cell, Grid_solver> cells{n_cells};
random_disk(0.733333, cells);
for (auto i = 0; i < n_cells; i++) {
cells.h_X[i].x = cells.h_X[i].z;
cells.h_X[i].z = rand() / (RAND_MAX + 1.) / 2;
cells.h_X[i].theta = M_PI / 2 + (rand() / (RAND_MAX + 1.) - 0.5) / 2;
// cells.h_X[i].phi = 2.*M_PI*rand()/(RAND_MAX + 1.);
auto phi = atan2(-cells.h_X[i].y, -cells.h_X[i].x);
cells.h_X[i].phi = phi + M_PI / 2;
}
cells.copy_to_device();
Links protrusions{n_cells * prots_per_cell};
auto intercalation = [&protrusions](
const Po_cell* __restrict__ d_X, Po_cell* d_dX) {
return link_forces(protrusions, d_X, d_dX);
};
// Simulate elongation
Vtk_output output{"aggregate"};
Grid grid{n_cells};
for (auto time_step = 0; time_step <= n_time_steps; time_step++) {
cells.copy_to_host();
protrusions.copy_to_host();
grid.build(cells, r_protrusion);
update_protrusions<<<(protrusions.get_d_n() + 32 - 1) / 32, 32>>>(
grid.d_grid, cells.d_X, protrusions.d_state, protrusions.d_link);
cells.take_step<lb_force>(dt, intercalation);
output.write_positions(cells);
output.write_links(protrusions);
output.write_polarity(cells);
}
return 0;
}
|
309b160593d25a1d7ee60bbb04b473809323f4c3.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
| 309b160593d25a1d7ee60bbb04b473809323f4c3.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
deefd56f32aea98a280edea57cae1d9da44a613f.hip | // !!! This is a file automatically generated by hipify!!!
/**
CUDA
([email protected])
$ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpu
-g GPU
-s SGPU()
BOUND1
BOUND1,2
6+(n27)
(n27)
(n27)
O(N)O(1)
2*N-1N
flags -flags & flags
===================
1
===================
(ON
)
0
ON
-----Q-- 00000100 0
---Q---- 00010000 1
------ Q- 00000010 2
Q------- 10000000 3
-------Q 00000001 4
-Q------ 01000000 5
---- Q--- 00001000 6
-- Q----- 00100000 7
===================
2
===================
1. : left
2. : down
3. : right
1 3 (0x08)
2 2 (0x04)
0 0x10 1
1 5 (0x20) 2 6 (0x40)
1
right left
rightleft1
*-------------
|. . . . . .
|. . . -3. . 0x02 -|
|. . -2. . . 0x04 |(1 bit right)
|. -1. . . . 0x08 -|
|Q . . . . . 0x10 (Q 4 down)
|. +1. . . . 0x20 -|
|. . +2. . . 0x40 |(1 bit left)
|. . . +3. . 0x80 -|
*-------------
nn+1
n(bit)OR
leftdownright
n+1
left :(left |bit)<<1
right:(right|bit)>>1
down : down|bit
===================
3
===================
n+1OR
ON
OR
ON
bitmap
bit=-bitmap & bitmap;//
00000011 3
00000010 2
00000001 1
00000000 0
11111111 -1
11111110 -2
11111101 -3
n-nn+1
n=22n-nANDn
ON
1
00010110 22
AND 11101010 -22
------------------
00000010
while bitmap ON
while(bitmap) {
bit=-bitmap & bitmap;
bitmap ^= bit;
//(bit)
}
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -r
CPUR
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.19
15: 2279184 0 1.24
16: 14772512 0 7.79
17: 95815104 0 57.57
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -c
CPU
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.21
15: 2279184 0 1.40
16: 14772512 0 8.78
17: 95815104 0 1:05.00
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -s
SGPU
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.02
13: 73712 0 00:00:00:00.03
14: 365596 0 00:00:00:00.08
15: 2279184 0 00:00:00:00.48
16: 14772512 0 00:00:00:02.41
17: 95815104 0 00:00:00:18.30
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -g
GPU
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.05
13: 73712 0 00:00:00:00.07
14: 365596 0 00:00:00:00.07
15: 2279184 0 00:00:00:00.37
16: 14772512 0 00:00:00:02.30
17: 95815104 0 00:00:00:18.07
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//
long TOTAL=0; //CPU,CPUR
long UNIQUE=0; //CPU,CPUR
typedef unsigned long long uint64;
typedef struct{
uint64 bv;
uint64 down;
uint64 left;
uint64 right;
int x[MAX];
int y[MAX];
}Board ;
//
Board b[2457600];
Board B;
// GPU
__global__ void cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond);
long long solve_nqueen_cuda(int size,int steps);
void NQueenG(int size,int mask,int row,int steps);
// SGPU
__global__ void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* results,int totalCond);
long long sgpu_solve_nqueen_cuda(int size,int steps);
// CPU
void TimeFormat(clock_t utime,char *form);
// CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r);
// CPUR
void NQueenR(int size,int mask,int row,uint64 bv,uint64 left,uint64 down,uint64 right);
//
//GPU
__global__
void cuda_kernel(
int size,
int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
int totalCond)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
//row=01
//mask
//n=8 mask==2
register int row=0;
register unsigned int bit;
//
//
//
//ID
register unsigned const int tid=threadIdx.x;
//ID
register unsigned const int bid=blockIdx.x;
//ID
register unsigned const int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightbitmap
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
//
//
//GPUstepstotalCond
if(idx<totalCond){
//totalDown,totalLeft,totalRight
//down,left,right
//CPU t_steps
//
// idx
//
/**06 **********/
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
//bitmap[tid][row]
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
/***************************************/
//
//bitmap[tid][row]=00000000
//1
/**06 **********/
//if(bitmap[tid][row]==0){
if(bitmap_tid_row==0){
/***************************************/
row--;
}else{
//
bitmap[tid][row]
^=bit
/**06 **********/
//=(-bitmap[tid][row]&bitmap[tid][row]);
=(-bitmap_tid_row&bitmap_tid_row);
/***************************************/
//
if((bit&mask)!=0){
//?
//
if(row+1==mark){
total++;
row--;
}else{
int rowP=row+1;
/**07register ****/
//down[tid][rowP]=down[tid][row]|bit;
down[tid][rowP]=down_tid_row|bit;
//left[tid][rowP]=(left[tid][row]|bit)<<1;
left[tid][rowP]=(left_tid_row|bit)<<1;
//right[tid][rowP]=(right[tid][row]|bit)>>1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
}else{
//totalCondtotal
sum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
}
__syncthreads();if(tid<32){
sum[tid]+=sum[tid+32];
}
__syncthreads();if(tid<16){
sum[tid]+=sum[tid+16];
}
__syncthreads();if(tid<8){
sum[tid]+=sum[tid+8];
}
__syncthreads();if(tid<4){
sum[tid]+=sum[tid+4];
}
__syncthreads();if(tid<2){
sum[tid]+=sum[tid+2];
}
__syncthreads();if(tid<1){
sum[tid]+=sum[tid+1];
}
__syncthreads();if(tid==0){
d_results[bid]=sum[0];
}
}
//
// GPU
long solve_nqueen_cuda(int size,int mask,int row,int n_left,int n_down,int n_right,int steps)
{
//GPUGPU
const unsigned int mark=size>11?size-10:2;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmap
//stack1
unsigned int bitmap[32];
//bitmap[row]=(left[row]|down[row]|right[row]);
/***06 bitGPU*********************/
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
/************************/
unsigned int bit;
//unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalDown;
hipHostMalloc((void**) &totalDown,sizeof(int)*steps);
//unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalLeft;
hipHostMalloc((void**) &totalLeft,sizeof(int)*steps);
//unsigned int* totalRight=new unsigned int[steps];
unsigned int* totalRight;
hipHostMalloc((void**) &totalRight,sizeof(int)*steps);
//unsigned int* h_results=new unsigned int[steps];
unsigned int* h_results;
hipHostMalloc((void**) &h_results,sizeof(int)*steps);
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
//123CPU->row==mark 3
//down,left,right totalDown,totalLeft,totalRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
/***06 bit*********************/
//06GPU
if(bitmap[row]==0){ row--; }
/************************/
/***06 bit*********************/
//06SGPU
//if((bitmap[row]&mask)==mask){row--;}
/************************/
else{//
//06SGPU
/***06 bit*********************/
//bit=(bitmap[row]+1)&~bitmap[row];
//bitmap[row]|=bit;
/************************/
//06GPU
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); //
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
/***06 bit*********************/
//bitmap[rowP]=(down[rowP]|left[rowP]|right[rowP]);
/************************/
/***06 bit*********************/
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
/************************/
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//totalDown,totalLeft,totalRight
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
//
totalCond++;
//GPUGPUstepsGPU
//
//ntotalCondstepsn
//
//totalCond==steps
if(totalCond==steps){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark GPU totalCond
hipLaunchKernelGGL(( cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
//steps
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==stepsGPU0
//(stepsGPU)
totalCond=0;
}
//totalDown,totalLeft,totalRight1
// row=2
//totalDown,totalLeft,totalRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark GPU totalCond
//steps
//totalCond
hipLaunchKernelGGL(( cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
//
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
/***06 cudaFreeHost**/
//delete[] totalDown;
hipHostFree(totalDown);
//delete[] totalLeft;
hipHostFree(totalLeft);
//delete[] totalRight;
hipHostFree(totalRight);
//delete[] h_results;
hipHostFree(h_results);
/************************/
return total;
}
//GPU
void NQueenG(int size,int steps)
{
register int sizeE=size-1;
register int bit=0;
register int mask=((1<<size)-1);
if(size<=0||size>32){return;}
//
int lim=(size%2==0)?size/2:sizeE/2;
for(int col=0;col<lim;col++){
bit=(1<<col);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
//TOTAL
TOTAL=TOTAL*2;
//
if(size%2==1){
bit=(1<<(sizeE)/2);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
}
//
//SGPU
__global__
void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond)
{
//
const int tid=threadIdx.x;//ID
const int bid=blockIdx.x;//ID
const int idx=bid*blockDim.x+tid;//ID
//
__shared__ unsigned int down[THREAD_NUM][10];//shared
__shared__ unsigned int left[THREAD_NUM][10];//THREAD_NUM
__shared__ unsigned int right[THREAD_NUM][10];//10maskGPU10
__shared__ unsigned int bitmap[THREAD_NUM][10];
__shared__ unsigned int sum[THREAD_NUM];
//
const unsigned int mask=(1<<size)-1;
int total=0;
int row=0;//row=01mask n=8 mask==2
unsigned int bit;
if(idx<totalCond){// GPUstepstotalCond
down[tid][row]=totalDown[idx];//totalDown,totalLeft,totalRightdown,left,right
left[tid][row]=totalLeft[idx];//CPU t_steps idx
right[tid][row]=totalRight[idx];
bitmap[tid][row]=down[tid][row]|left[tid][row]|right[tid][row];//down,left,rightbitmap
while(row>=0){
//
//06GPU
//if(bitmap[tid][row]==0){//bitmap[tid][row]=00000000 1
//06SGPU
if((bitmap[tid][row]&mask)==mask){//bitmap[tid][row]=00000000 1
//
row--;
}else{
//
//06GPU
//bitmap[tid][row]^=bit=(-bitmap[tid][row]&bitmap[tid][row]); //
//06SGPU
bit=(bitmap[tid][row]+1)&~bitmap[tid][row];
bitmap[tid][row]|=bit;
//
if((bit&mask)!=0){//
if(row+1==mark){//?
total++;
row--;
}
else{
down[tid][row+1]=down[tid][row]|bit;
left[tid][row+1]=(left[tid][row]|bit)<<1;
right[tid][row+1]=(right[tid][row]|bit)>>1;
bitmap[tid][row+1]=(down[tid][row+1]|left[tid][row+1]|right[tid][row+1]);
row++;
}
}else{//
row--;
}
}
}
sum[tid]=total;//sum[tid]
}else{//totalCond total
sum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}//__syncthreads();1
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){d_results[bid]=sum[0];}
}
//
//SGPU
long long sgpu_solve_nqueen_cuda(int size,int steps)
{
unsigned int down[32];
unsigned int left[32];
unsigned int right[32];
unsigned int bitmap[32];
unsigned int bit;
if(size<=0||size>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* h_results=new unsigned int[steps];
//device
unsigned int* downCuda;
hipMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
hipMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
hipMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<size)-1;
const unsigned int mark=size>11?size-10:2;
long long total=0;
int totalCond=0;
int row=0;
down[0]=0;
left[0]=0;
right[0]=0;
bitmap[0]=0;
bool matched=false;
for(int col=0;col<size/2;col++){
bit=(1<<col);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
}
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
total*=2;
if(size%2==1){
matched=false;
totalCond=0;
bit=(1<<(size-1)/2);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
if(matched){
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
hipMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,hipMemcpyHostToDevice);
/** backTrack+bitmap*/
hipLaunchKernelGGL(( sgpu_cuda_kernel), dim3(steps/THREAD_NUM),dim3(THREAD_NUM)
, 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
hipMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
}
hipFree(downCuda);
hipFree(leftCuda);
hipFree(rightCuda);
hipFree(resultsCuda);
delete[] totalDown;
delete[] totalLeft;
delete[] totalRight;
delete[] h_results;
return total;
}
//
//CUDA
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
hipSetDevice(i);
return true;
}
//
//hh:mm:ss.ms
void TimeFormat(clock_t utime,char *form){
int dd,hh,mm;
float ftime,ss;
ftime=(float)utime/CLOCKS_PER_SEC;
mm=(int)ftime/60;
ss=ftime-(int)(mm*60);
dd=mm/(24*60);
mm=mm%(24*60);
hh=mm/60;
mm=mm%60;
if(dd)
sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss);
else if(hh)
sprintf(form," %2d:%02d:%05.2f",hh,mm,ss);
else if(mm)
sprintf(form," %2d:%05.2f",mm,ss);
else
sprintf(form," %5.2f",ss);
}
//
bool board_placement(int si,int x,int y)
{
//
//printf("i:%d:x:%d:y:%d\n",i,B.x[i],B.y[i]);
if(B.x[x]==y){
//printf("Duplicate x:%d:y:%d\n",x,y);
////OK
return true;
}
B.x[x]=y;
//x y p.N-1-x+y x+y
uint64 bv=1<<x;
uint64 down=1<<y;
B.y[x]=B.y[x]+down;
uint64 left=1<<(si-1-x+y);
uint64 right=1<<(x+y);
//printf("check valid x:%d:y:%d:p.N-1-x+y:%d;x+y:%d\n",x,y,si-1-x+y,x+y);
//printf("check valid pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("bvcheck:%d:bhcheck:%d:bucheck:%d:bdcheck:%d\n",B.bv&bv,B.bh&bh,B.bu&bu,B.bd&bd);
if((B.bv&bv)||(B.down&down)||(B.left&left)||(B.right&right)){
//printf("valid_false\n");
return false;
}
//printf("before pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
B.bv|=bv;
B.down|=down;
B.left|=left;
B.right|=right;
//printf("after pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("valid_true\n");
return true;
}
//
//CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r){
int sizeE=size-1;
int n;
uint64 bitmap[size];
uint64 bv[size];
uint64 left[size];
uint64 down[size];
uint64 right[size];
uint64 bit=0;
bitmap[row]=mask&~(l|d|r);
bv[row]=b;
down[row]=d;
left[row]=l;
right[row]=r;
while(row>=2){
//printf("row:%d,bv:%d,left:%d,down:%d,right:%d\n",row,bv[row],left[row],down[row],right[row]);
while((bv[row]&1)!=0) {
n=row++;
bv[row]=bv[n]>>1;//
left[row]=left[n]<<1;//left
right[row]=right[n]>>1;//right
down[row]=down[n];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
bv[row+1]=bv[row]>>1;
if(bitmap[row]==0){
--row;
}else{
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0||row>=sizeE){
//if((bit)!=0){
if(row>=sizeE){
TOTAL++;
--row;
}else{
n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
//bitmap[row]=~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
//
//CPUR
void NQueenR(int size,uint64 mask, int row,uint64 bv,uint64 left,uint64 down,uint64 right){
uint64 bitmap=0;
uint64 bit=0;
//printf("row:%d,bv:%d,left:%d,down:%d,right:%d\n",row,bv,left,down,right);
//
while((bv&1)!=0) {
bv>>=1;//
left<<=1;//left
right>>=1;//right
row++;
}
bv>>=1;
if(row==size){
TOTAL++;
}else{
//bitmap=mask&~(left|down|right);//mask10
bitmap=~(left|down|right);
while(bitmap>0){
bit=(-bitmap&bitmap);
bitmap=(bitmap^bit);
NQueenR(size,mask,row+1,bv,(left|bit)<<1,down|bit,(right|bit)>>1);
}
}
}
//
long prepare(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=0;w<idx;w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
b[bcnt]=B;
bcnt++;
}
}
}
}
return bcnt;
}
//
int main(int argc,char** argv) {
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1,steps=24576;
/** */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else
cpur=true;
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/** */
if(cpu){
printf("\n\nCPU \n");
}else if(cpur){
printf("\n\nCPUR \n");
}else if(gpu){
printf("\n\nGPU \n");
}else if(sgpu){
printf("\n\nSGPU \n");
}
if(cpu||cpur){
printf("%s\n"," N: Total Unique hh:mm:ss.ms");
clock_t st; //
char t[20]; //hh:mm:ss.ms
int min=5;
int targetN=15;
uint64 mask;
for(int i=min;i<=targetN;i++){
TOTAL=0;
UNIQUE=0;
mask=((1<<i)-1);
int size=i;
// 22
long bcnt=prepare(size);
//
st=clock();
for (long bc=0;bc<=bcnt;bc++){
B=b[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
TimeFormat(clock()-st,t);
printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t);
}
}
if(gpu||sgpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=17;
struct timeval t0;struct timeval t1;
int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); //
if(gpu){
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}else if(sgpu){
TOTAL=sgpu_solve_nqueen_cuda(i,steps);
UNIQUE=0;
}
gettimeofday(&t1,NULL); //
if(t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
| deefd56f32aea98a280edea57cae1d9da44a613f.cu | /**
CUDAで学ぶアルゴリズムとデータ構造
ステップバイステップでN−クイーン問題を最適化
一般社団法人 共同通信社 情報技術局 鈴木 維一郎([email protected])
コンパイルと実行
$ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g|-s)
-c:cpu
-r cpu再帰
-g GPU
-s SGPU(サマーズ版と思われる)
1.ブルートフォース
2.:
3.バックトラック(配列)
4.:
5.:
6.バックトラック(ビットマップ)
7.ミラー
8.対象解除
9.クイーンの位置による分岐BOUND1
10.クイーンの位置による分岐BOUND1,2
11.枝刈り
12.最適化
13.並列処理
6.バックトラック+ビット(n27)
上下左右2行にクイーンを配置したのち(ビット(n27))バックトラックで解を求めます。
バックトラック
各列、対角線上にクイーンがあるかどうかのフラグを用意し、途中で制約を満た
さない事が明らかな場合は、それ以降のパターン生成を行わない。
各列、対角線上にクイーンがあるかどうかのフラグを用意することで高速化を図る。
これまでは行方向と列方向に重複しない組み合わせを列挙するものですが、王妃
は斜め方向のコマをとることができるので、どの斜めライン上にも王妃をひとつだ
けしか配置できない制限を加える事により、深さ優先探索で全ての葉を訪問せず木
を降りても解がないと判明した時点で木を引き返すということができます。
ビット(n27)
ビット演算を使って高速化 状態をビットマップにパックし、処理する
単純なバックトラックよりも20〜30倍高速
ビットマップであれば、シフトにより高速にデータを移動できる。
フラグ配列ではデータの移動にO(N)の時間がかかるが、ビットマップであればO(1)
フラグ配列のように、斜め方向に 2*N-1の要素を用意するのではなく、Nビットで充
分。
配置可能なビット列を flags に入れ、-flags & flags で順にビットを取り出し処理。
バックトラックよりも20−30倍高速。
===================
考え方 1
===================
N×NのチェスボードをN個のビットフィールドで表し、ひとつの横列の状態をひと
つのビットフィールドに対応させます。(クイーンが置いてある位置のビットをONに
する)
そしてバックトラッキングは0番目のビットフィールドから「下に向かって」順にい
ずれかのビット位置をひとつだけONにして進めていきます。
-----Q-- 00000100 0番目のビットフィールド
---Q---- 00010000 1番目のビットフィールド
------ Q- 00000010 2番目のビットフィールド
Q------- 10000000 3番目のビットフィールド
-------Q 00000001 4番目のビットフィールド
-Q------ 01000000 5番目のビットフィールド
---- Q--- 00001000 6番目のビットフィールド
-- Q----- 00100000 7番目のビットフィールド
===================
考え方 2
===================
次に、効き筋をチェックするためにさらに3つのビットフィールドを用意します。
1. 左下に効き筋が進むもの: left
2. 真下に効き筋が進むもの: down
3. 右下に効き筋が進むもの: right
次に、斜めの利き筋を考えます。
上図の場合、
1列目の右斜め上の利き筋は 3 番目(0x08)
2列目の右斜め上の利き筋は 2 番目(0x04) になります。
この値は 0 列目のクイーンの位置 0x10 を 1 ビットずつ「右シフト」すれば求める
ことができます。
また、左斜め上の利き筋の場合、1 列目では 5 番目(0x20) で 2 列目では 6 番目(0x40)
になるので、今度は 1 ビットずつ「左シフト」すれば求めることができます。
つまり、右シフトの利き筋を right、左シフトの利き筋を left で表すことで、クイー
ンの効き筋はrightとleftを1 ビットシフトするだけで求めることができるわけです。
*-------------
|. . . . . .
|. . . -3. . 0x02 -|
|. . -2. . . 0x04 |(1 bit 右シフト right)
|. -1. . . . 0x08 -|
|Q . . . . . 0x10 ←(Q の位置は 4 down)
|. +1. . . . 0x20 -|
|. . +2. . . 0x40 |(1 bit 左シフト left)
|. . . +3. . 0x80 -|
*-------------
図:斜めの利き筋のチェック
n番目のビットフィールドからn+1番目のビットフィールドに探索を進めるときに、そ
の3つのビットフィールドとn番目のビットフィールド(bit)とのOR演算をそれぞれ行
います。leftは左にひとつシフトし、downはそのまま、rightは右にひとつシフトして
n+1番目のビットフィールド探索に渡してやります。
left :(left |bit)<<1
right:(right|bit)>>1
down : down|bit
===================
考え方 3
===================
n+1番目のビットフィールドの探索では、この3つのビットフィールドをOR演算した
ビットフィールドを作り、それがONになっている位置は効き筋に当たるので置くことが
できない位置ということになります。次にその3つのビットフィールドをORしたビッ
トフィールドをビット反転させます。つまり「配置可能なビットがONになったビットフィー
ルド」に変換します。そしてこの配置可能なビットフィールドを bitmap と呼ぶとして、
次の演算を行なってみます。
bit=-bitmap & bitmap;//一番右のビットを取り出す
この演算式の意味を理解するには負の値がコンピュータにおける2進法ではどのよう
に表現されているのかを知る必要があります。負の値を2進法で具体的に表わしてみる
と次のようになります。
00000011 3
00000010 2
00000001 1
00000000 0
11111111 -1
11111110 -2
11111101 -3
正の値nを負の値-nにするときは、nをビット反転してから+1されています。そして、
例えばn=22としてnと-nをAND演算すると下のようになります。nを2進法で表したときの
一番下位のONビットがひとつだけ抽出される結果が得られるのです。極めて簡単な演算
によって1ビット抽出を実現させていることが重要です。
00010110 22
AND 11101010 -22
------------------
00000010
さて、そこで下のようなwhile文を書けば、このループは bitmap のONビットの数の
回数だけループすることになります。配置可能なパターンをひとつずつ全く無駄がなく
生成されることになります。
while(bitmap) {
bit=-bitmap & bitmap;
bitmap ^= bit;
//ここでは配置可能なパターンがひとつずつ生成される(bit)
}
実行結果
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -r
6.CPUR 再帰 バックトラック+ビットマップ
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.19
15: 2279184 0 1.24
16: 14772512 0 7.79
17: 95815104 0 57.57
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -c
6.CPU 非再帰 バックトラック+ビットマップ
N: Total Unique hh:mm:ss.ms
4: 2 0 0.00
5: 10 0 0.00
6: 4 0 0.00
7: 40 0 0.00
8: 92 0 0.00
9: 352 0 0.00
10: 724 0 0.00
11: 2680 0 0.00
12: 14200 0 0.01
13: 73712 0 0.04
14: 365596 0 0.21
15: 2279184 0 1.40
16: 14772512 0 8.78
17: 95815104 0 1:05.00
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -s
6.SGPU 非再帰 バックトラック+ビットマップ
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.02
13: 73712 0 00:00:00:00.03
14: 365596 0 00:00:00:00.08
15: 2279184 0 00:00:00:00.48
16: 14772512 0 00:00:00:02.41
17: 95815104 0 00:00:00:18.30
$ nvcc -O3 CUDA06_N-Queen.cu && ./a.out -g
6.GPU 非再帰 バックトラック+ビットマップ
N: Total Unique dd:hh:mm:ss.ms
4: 2 0 00:00:00:00.02
5: 10 0 00:00:00:00.00
6: 4 0 00:00:00:00.00
7: 40 0 00:00:00:00.00
8: 92 0 00:00:00:00.00
9: 352 0 00:00:00:00.00
10: 724 0 00:00:00:00.00
11: 2680 0 00:00:00:00.01
12: 14200 0 00:00:00:00.05
13: 73712 0 00:00:00:00.07
14: 365596 0 00:00:00:00.07
15: 2279184 0 00:00:00:00.37
16: 14772512 0 00:00:00:02.30
17: 95815104 0 00:00:00:18.07
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//変数宣言
long TOTAL=0; //CPU,CPUR
long UNIQUE=0; //CPU,CPUR
typedef unsigned long long uint64;
typedef struct{
uint64 bv;
uint64 down;
uint64 left;
uint64 right;
int x[MAX];
int y[MAX];
}Board ;
//
Board b[2457600];
Board B;
//関数宣言 GPU
__global__ void cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond);
long long solve_nqueen_cuda(int size,int steps);
void NQueenG(int size,int mask,int row,int steps);
//関数宣言 SGPU
__global__ void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* results,int totalCond);
long long sgpu_solve_nqueen_cuda(int size,int steps);
//関数宣言 CPU
void TimeFormat(clock_t utime,char *form);
//関数宣言 CPU
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r);
//関数宣言 CPUR
void NQueenR(int size,int mask,int row,uint64 bv,uint64 left,uint64 down,uint64 right);
//
//GPU
__global__
void cuda_kernel(
int size,
int mark,
unsigned int* totalDown,
unsigned int* totalLeft,
unsigned int* totalRight,
unsigned int* d_results,
int totalCond)
{
register const unsigned int mask=(1<<size)-1;
register unsigned int total=0;
//row=0となってるが1行目からやっているわけではなく
//mask行目以降からスタート
//n=8 なら mask==2 なので そこからスタート
register int row=0;
register unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
register unsigned const int tid=threadIdx.x;
//グリッド内のブロックID
register unsigned const int bid=blockIdx.x;
//全体通してのID
register unsigned const int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=totalDown[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=totalLeft[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=totalRight[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightからbitmapを出す
bitmap[tid][row]
=mask&~(
down[tid][row]
|left[tid][row]
|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
//
//余分なスレッドは動かさない
//GPUはsteps数起動するがtotalCond以上は空回しする
if(idx<totalCond){
//totalDown,totalLeft,totalRightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はsteps個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
/**06 スカラー変数に置き換えた**********/
register unsigned int bitmap_tid_row;
register unsigned int down_tid_row;
register unsigned int left_tid_row;
register unsigned int right_tid_row;
while(row>=0){
//bitmap[tid][row]をスカラー変数に置き換え
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
/***************************************/
//
//bitmap[tid][row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
/**06 スカラー変数に置き換えた**********/
//if(bitmap[tid][row]==0){
if(bitmap_tid_row==0){
/***************************************/
row--;
}else{
//クイーンを置く
bitmap[tid][row]
^=bit
/**06 スカラー変数に置き換えた**********/
//=(-bitmap[tid][row]&bitmap[tid][row]);
=(-bitmap_tid_row&bitmap_tid_row);
/***************************************/
//置く場所があるかどうか
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
total++;
row--;
}else{
int rowP=row+1;
/**07スカラー変数に置き換えてregister対応 ****/
//down[tid][rowP]=down[tid][row]|bit;
down[tid][rowP]=down_tid_row|bit;
//left[tid][rowP]=(left[tid][row]|bit)<<1;
left[tid][rowP]=(left_tid_row|bit)<<1;
//right[tid][rowP]=(right[tid][row]|bit)>>1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
}else{
//totalCond未満は空回しするのでtotalは加算しない
sum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
}
__syncthreads();if(tid<32){
sum[tid]+=sum[tid+32];
}
__syncthreads();if(tid<16){
sum[tid]+=sum[tid+16];
}
__syncthreads();if(tid<8){
sum[tid]+=sum[tid+8];
}
__syncthreads();if(tid<4){
sum[tid]+=sum[tid+4];
}
__syncthreads();if(tid<2){
sum[tid]+=sum[tid+2];
}
__syncthreads();if(tid<1){
sum[tid]+=sum[tid+1];
}
__syncthreads();if(tid==0){
d_results[bid]=sum[0];
}
}
//
// GPU
long solve_nqueen_cuda(int size,int mask,int row,int n_left,int n_down,int n_right,int steps)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
const unsigned int mark=size>11?size-10:2;
const unsigned int h_mark=row;
long total=0;
int totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=n_down;
unsigned int right[32]; right[row]=n_right;
unsigned int left[32]; left[row]=n_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
//bitmap[row]=(left[row]|down[row]|right[row]);
/***06 bit処理をGPU*********************/
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
/************************/
unsigned int bit;
//unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalDown;
cudaMallocHost((void**) &totalDown,sizeof(int)*steps);
//unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalLeft;
cudaMallocHost((void**) &totalLeft,sizeof(int)*steps);
//unsigned int* totalRight=new unsigned int[steps];
unsigned int* totalRight;
cudaMallocHost((void**) &totalRight,sizeof(int)*steps);
//unsigned int* h_results=new unsigned int[steps];
unsigned int* h_results;
cudaMallocHost((void**) &h_results,sizeof(int)*steps);
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報を totalDown,totalLeft,totalRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
//while(row>=0) {
register int rowP=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
/***06 bit操作変更*********************/
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
/************************/
/***06 bit操作変更でコメント*********************/
//06SGPU
//if((bitmap[row]&mask)==mask){row--;}
/************************/
else{//おける場所があれば進む
//06SGPU
/***06 bit操作変更でコメント*********************/
//bit=(bitmap[row]+1)&~bitmap[row];
//bitmap[row]|=bit;
/************************/
//06GPU こっちのほうが優秀
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); //クイーンを置く
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
/***06 bit操作変更でコメント*********************/
//bitmap[rowP]=(down[rowP]|left[rowP]|right[rowP]);
/************************/
/***06 bit操作変更*********************/
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
/************************/
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//totalDown,totalLeft,totalRightに格納する
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==stepsの場合だけこの中へ
if(totalCond==steps){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark は何行GPUを実行するか totalCondはスレッド数
cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもsteps数分だけGPUを起動できる)
totalCond=0;
}
//totalDown,totalLeft,totalRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//totalDown,totalLeft,totalRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
//size-mark は何行GPUを実行するか totalCondはスレッド数
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは
//totalCondの数だけでそれ以外は空回しになる
cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){
total+=h_results[col];
}
//
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
/***06 cudaFreeHostへ変更**/
//delete[] totalDown;
cudaFreeHost(totalDown);
//delete[] totalLeft;
cudaFreeHost(totalLeft);
//delete[] totalRight;
cudaFreeHost(totalRight);
//delete[] h_results;
cudaFreeHost(h_results);
/************************/
return total;
}
//GPU
void NQueenG(int size,int steps)
{
register int sizeE=size-1;
register int bit=0;
register int mask=((1<<size)-1);
if(size<=0||size>32){return;}
//偶数、奇数共通 右側半分だけクイーンを置く
int lim=(size%2==0)?size/2:sizeE/2;
for(int col=0;col<lim;col++){
bit=(1<<col);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
//ミラーなのでTOTALを2倍する
TOTAL=TOTAL*2;
//奇数の場合はさらに中央にクイーンを置く
if(size%2==1){
bit=(1<<(sizeE)/2);
TOTAL+=solve_nqueen_cuda(size,mask,1,bit<<1,bit,bit>>1,steps);
}
}
//
//SGPU
__global__
void sgpu_cuda_kernel(int size,int mark,unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,unsigned int* d_results,int totalCond)
{
//スレッド
const int tid=threadIdx.x;//ブロック内のスレッドID
const int bid=blockIdx.x;//グリッド内のブロックID
const int idx=bid*blockDim.x+tid;//全体通してのID
//シェアードメモリ
__shared__ unsigned int down[THREAD_NUM][10];//sharedメモリを使う ブロック内スレッドで共有
__shared__ unsigned int left[THREAD_NUM][10];//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int right[THREAD_NUM][10];//10で固定なのは現在のmaskの設定でGPUで実行するのは最大10だから
__shared__ unsigned int bitmap[THREAD_NUM][10];
__shared__ unsigned int sum[THREAD_NUM];
//
const unsigned int mask=(1<<size)-1;
int total=0;
int row=0;//row=0となってるが1行目からやっているわけではなくmask行目以降からスタート n=8 なら mask==2 なので そこからスタート
unsigned int bit;
if(idx<totalCond){//余分なスレッドは動かさない GPUはsteps数起動するがtotalCond以上は空回しする
down[tid][row]=totalDown[idx];//totalDown,totalLeft,totalRightの情報をdown,left,rightに詰め直す
left[tid][row]=totalLeft[idx];//CPU で詰め込んだ t_はsteps個あるがブロック内ではブロックあたりのスレッドすうに限定されるので idxでよい
right[tid][row]=totalRight[idx];
bitmap[tid][row]=down[tid][row]|left[tid][row]|right[tid][row];//down,left,rightからbitmapを出す
while(row>=0){
//
//06のGPU
//if(bitmap[tid][row]==0){//bitmap[tid][row]=00000000 クイーンをどこにも置けないので1行上に戻る
//06のSGPU
if((bitmap[tid][row]&mask)==mask){//bitmap[tid][row]=00000000 クイーンをどこにも置けないので1行上に戻る
//
row--;
}else{
//
//06GPU
//bitmap[tid][row]^=bit=(-bitmap[tid][row]&bitmap[tid][row]); //クイーンを置く
//06SGPU
bit=(bitmap[tid][row]+1)&~bitmap[tid][row];
bitmap[tid][row]|=bit;
//
if((bit&mask)!=0){//置く場所があるかどうか
if(row+1==mark){//最終行?最終行から1個前の行まで無事到達したら 加算する
total++;
row--;
}
else{
down[tid][row+1]=down[tid][row]|bit;
left[tid][row+1]=(left[tid][row]|bit)<<1;
right[tid][row+1]=(right[tid][row]|bit)>>1;
bitmap[tid][row+1]=(down[tid][row+1]|left[tid][row+1]|right[tid][row+1]);
row++;
}
}else{//置く場所がなければ1個上に
row--;
}
}
}
sum[tid]=total;//最後sum[tid]に加算する
}else{//totalCond未満は空回しするので当然 totalは加算しない
sum[tid]=0;
}
//__syncthreads()で、ブロック内のスレッド間の同期をとれます。
//同期を取るということは、全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}//__syncthreads();は複数個必要1個だけ記述したら数が違った
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){d_results[bid]=sum[0];}
}
//
//SGPU
long long sgpu_solve_nqueen_cuda(int size,int steps)
{
unsigned int down[32];
unsigned int left[32];
unsigned int right[32];
unsigned int bitmap[32];
unsigned int bit;
if(size<=0||size>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* h_results=new unsigned int[steps];
//device
unsigned int* downCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
unsigned int* leftCuda;
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
unsigned int* rightCuda;
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
unsigned int* resultsCuda;
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<size)-1;
const unsigned int mark=size>11?size-10:2;
long long total=0;
int totalCond=0;
int row=0;
down[0]=0;
left[0]=0;
right[0]=0;
bitmap[0]=0;
bool matched=false;
for(int col=0;col<size/2;col++){
bit=(1<<col);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
}
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
total*=2;
if(size%2==1){
matched=false;
totalCond=0;
bit=(1<<(size-1)/2);
bitmap[0]|=bit;
down[1]=bit;
left[1]=bit<<1;
right[1]=bit>>1;
bitmap[1]=(down[1]|left[1]|right[1]);
row=1;
while(row>0){
if((bitmap[row]&mask)==mask){row--;}
else{
bit=(bitmap[row]+1)&~bitmap[row];
bitmap[row]|=bit;
if((bit&mask)!=0){
down[row+1]=down[row]|bit;
left[row+1]=(left[row]|bit)<<1;
right[row+1]=(right[row]|bit)>>1;
bitmap[row+1]=(down[row+1]|left[row+1]|right[row+1]);
row++;
if(row==mark){
totalDown[totalCond]=down[row];
totalLeft[totalCond]=left[row];
totalRight[totalCond]=right[row];
totalCond++;
if(totalCond==steps){
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
matched=true;
totalCond=0;
}
row--;
}
}else{row--;}
}
}
if(matched){
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
matched=false;
}
cudaMemcpy(downCuda,totalDown,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,
sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
sgpu_cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM
>>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(h_results,resultsCuda,
sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<steps/THREAD_NUM;col++){total+=h_results[col];}
}
cudaFree(downCuda);
cudaFree(leftCuda);
cudaFree(rightCuda);
cudaFree(resultsCuda);
delete[] totalDown;
delete[] totalLeft;
delete[] totalRight;
delete[] h_results;
return total;
}
//
//CUDA 初期化
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//
//hh:mm:ss.ms形式に処理時間を出力
void TimeFormat(clock_t utime,char *form){
int dd,hh,mm;
float ftime,ss;
ftime=(float)utime/CLOCKS_PER_SEC;
mm=(int)ftime/60;
ss=ftime-(int)(mm*60);
dd=mm/(24*60);
mm=mm%(24*60);
hh=mm/60;
mm=mm%60;
if(dd)
sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss);
else if(hh)
sprintf(form," %2d:%02d:%05.2f",hh,mm,ss);
else if(mm)
sprintf(form," %2d:%05.2f",mm,ss);
else
sprintf(form," %5.2f",ss);
}
//
bool board_placement(int si,int x,int y)
{
//同じ場所に置くかチェック
//printf("i:%d:x:%d:y:%d\n",i,B.x[i],B.y[i]);
if(B.x[x]==y){
//printf("Duplicate x:%d:y:%d\n",x,y);
////同じ場所に置くのはOK
return true;
}
B.x[x]=y;
//xは行 yは列 p.N-1-x+yは右上から左下 x+yは左上から右下
uint64 bv=1<<x;
uint64 down=1<<y;
B.y[x]=B.y[x]+down;
uint64 left=1<<(si-1-x+y);
uint64 right=1<<(x+y);
//printf("check valid x:%d:y:%d:p.N-1-x+y:%d;x+y:%d\n",x,y,si-1-x+y,x+y);
//printf("check valid pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("bvcheck:%d:bhcheck:%d:bucheck:%d:bdcheck:%d\n",B.bv&bv,B.bh&bh,B.bu&bu,B.bd&bd);
if((B.bv&bv)||(B.down&down)||(B.left&left)||(B.right&right)){
//printf("valid_false\n");
return false;
}
//printf("before pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
B.bv|=bv;
B.down|=down;
B.left|=left;
B.right|=right;
//printf("after pbv:%d:bv:%d:pbh:%d:bh:%d:pbu:%d:bu:%d:pbd:%d:bd:%d\n",B.bv,bv,B.bh,bh,B.bu,bu,B.bd,bd);
//printf("valid_true\n");
return true;
}
//
//CPU 非再帰版 ロジックメソッド
void NQueen(int size,int mask,int row,uint64 b,uint64 l,uint64 d,uint64 r){
int sizeE=size-1;
int n;
uint64 bitmap[size];
uint64 bv[size];
uint64 left[size];
uint64 down[size];
uint64 right[size];
uint64 bit=0;
bitmap[row]=mask&~(l|d|r);
bv[row]=b;
down[row]=d;
left[row]=l;
right[row]=r;
while(row>=2){
//printf("row:%d,bv:%d,left:%d,down:%d,right:%d\n",row,bv[row],left[row],down[row],right[row]);
while((bv[row]&1)!=0) {
n=row++;
bv[row]=bv[n]>>1;//右に1ビットシフト
left[row]=left[n]<<1;//left 左に1ビットシフト
right[row]=right[n]>>1;//right 右に1ビットシフト
down[row]=down[n];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
bv[row+1]=bv[row]>>1;
if(bitmap[row]==0){
--row;
}else{
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0||row>=sizeE){
//if((bit)!=0){
if(row>=sizeE){
TOTAL++;
--row;
}else{
n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=down[n]|bit;
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
//bitmap[row]=~(left[row]|down[row]|right[row]);
}
}else{
--row;
}
}
}
}
//
//
//CPUR 再帰版 ロジックメソッド
void NQueenR(int size,uint64 mask, int row,uint64 bv,uint64 left,uint64 down,uint64 right){
uint64 bitmap=0;
uint64 bit=0;
//printf("row:%d,bv:%d,left:%d,down:%d,right:%d\n",row,bv,left,down,right);
//既にクイーンを置いている行はスキップする
while((bv&1)!=0) {
bv>>=1;//右に1ビットシフト
left<<=1;//left 左に1ビットシフト
right>>=1;//right 右に1ビットシフト
row++;
}
bv>>=1;
if(row==size){
TOTAL++;
}else{
//bitmap=mask&~(left|down|right);//maskつけると10桁目以降数が出なくなるので外した
bitmap=~(left|down|right);
while(bitmap>0){
bit=(-bitmap&bitmap);
bitmap=(bitmap^bit);
NQueenR(size,mask,row+1,bv,(left|bit)<<1,down|bit,(right|bit)>>1);
}
}
}
//
long prepare(int size){
//CPUR
int pres_a[930];
int pres_b[930];
int idx=0;
long bcnt;
for(int a=0;a<size;a++){
for(int b=0;b<size;b++){
if((a>=b&&(a-b)<=1)||(b>a&&(b-a)<=1)){
continue;
}
pres_a[idx]=a;
pres_b[idx]=b;
idx++;
}
}
Board wB=B;
for(int w=0;w<idx;w++){
B=wB;
B.bv=B.down=B.left=B.right=0;
for(int j=0;j<size;j++){
B.x[j]=-1;
}
board_placement(size,0,pres_a[w]);
board_placement(size,1,pres_b[w]);
Board nB=B;
//int lsize=(size-2)*(size-1)-w;
//for(int n=w;n<lsize;n++){
for(int n=0;n<idx;n++){
B=nB;
if(board_placement(size,pres_a[n],size-1)==false){
continue;
}
if(board_placement(size,pres_b[n],size-2)==false){
continue;
}
Board eB=B;
//for(int e=w;e<lsize;e++){
for(int e=0;e<idx;e++){
B=eB;
if(board_placement(size,size-1,size-1-pres_a[e])==false){
continue;
}
if(board_placement(size,size-2,size-1-pres_b[e])==false){
continue;
}
Board sB=B;
//for(int s=w;s<lsize;s++){
for(int s=0;s<idx;s++){
B=sB;
if(board_placement(size,size-1-pres_a[s],0)==false){
continue;
}
if(board_placement(size,size-1-pres_b[s],1)==false){
continue;
}
b[bcnt]=B;
bcnt++;
}
}
}
}
return bcnt;
}
//メインメソッド
int main(int argc,char** argv) {
bool cpu=false,cpur=false,gpu=false,sgpu=false;
int argstart=1,steps=24576;
/** パラメータの処理 */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;}
else
cpur=true;
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf(" -s: SGPU only\n");
printf("Default to 8 queen\n");
}
/** 出力と実行 */
if(cpu){
printf("\n\n6.CPU 非再帰 バックトラック+ビットマップ\n");
}else if(cpur){
printf("\n\n6.CPUR 再帰 バックトラック+ビットマップ\n");
}else if(gpu){
printf("\n\n6.GPU 非再帰 バックトラック+ビットマップ\n");
}else if(sgpu){
printf("\n\n6.SGPU 非再帰 バックトラック+ビットマップ\n");
}
if(cpu||cpur){
printf("%s\n"," N: Total Unique hh:mm:ss.ms");
clock_t st; //速度計測用
char t[20]; //hh:mm:ss.msを格納
int min=5;
int targetN=15;
uint64 mask;
for(int i=min;i<=targetN;i++){
TOTAL=0;
UNIQUE=0;
mask=((1<<i)-1);
int size=i;
//事前準備 上下左右2行2列にクイーンを配置する
long bcnt=prepare(size);
//事前準備が終わってから時間を計測する
st=clock();
for (long bc=0;bc<=bcnt;bc++){
B=b[bc];
if(cpur){
//CPUR
NQueenR(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}else if(cpu){
//CPU
NQueen(i,mask,2,B.bv >> 2,
B.left>>4,
((((B.down>>2)|(~0<<(size-4)))+1)<<(size-5))-1,
(B.right>>4)<<(size-5));
}
}
TimeFormat(clock()-st,t);
printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t);
}
}
if(gpu||sgpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=17;
struct timeval t0;struct timeval t1;
int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); // 計測開始
if(gpu){
TOTAL=0;
UNIQUE=0;
NQueenG(i,steps);
}else if(sgpu){
TOTAL=sgpu_solve_nqueen_cuda(i,steps);
UNIQUE=0;
}
gettimeofday(&t1,NULL); // 計測終了
if(t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
a2767a32c15597222a07f84d35b4e05e1319f687.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xpoti_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ali Charara
* @date 2017-11-13
**/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "rocblas.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "operators.h"
#include "defs.h"
#include "kblas_common.h"
#include "batch_common.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xpoti_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int A_row_off, int A_col_off, int lda,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<false>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE**, false>(
handle,
uplo, n,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
//==============================================================================================
//Strided form
// template<>
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<true>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE*, true>(
handle,
uplo, n,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch_strided(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
| a2767a32c15597222a07f84d35b4e05e1319f687.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xpoti_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ali Charara
* @date 2017-11-13
**/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cublas_v2.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "operators.h"
#include "defs.h"
#include "kblas_common.h"
#include "batch_common.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xpoti_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int A_row_off, int A_col_off, int lda,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<false>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE**, false>(
handle,
uplo, n,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
//==============================================================================================
//Strided form
// template<>
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<true>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE*, true>(
handle,
uplo, n,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch_strided(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
|
77f0730a8d5d16a97446ea368c93c75b6f2ddbd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda_runtime.h>
#include <device_launch_parameters.h>
#include<stdio.h>
#include<iostream>
__global__ void checkIndex(void) {
printf("threadIdx:(%d,%d,%d) blockIdx:(%d,%d,%d) blockDim:(%d,%d,%d) gridDim:(%d,%d,%d)\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv) {
//
int nElem = 6;
//
dim3 block(3);
dim3 grid((nElem+block.x-1)/block.x);
//
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
//
hipLaunchKernelGGL(( checkIndex), dim3(grid), dim3(block), 0, 0, );
//
hipDeviceReset();
return 0;
} | 77f0730a8d5d16a97446ea368c93c75b6f2ddbd8.cu | #include<cuda_runtime.h>
#include <device_launch_parameters.h>
#include<stdio.h>
#include<iostream>
__global__ void checkIndex(void) {
printf("threadIdx:(%d,%d,%d) blockIdx:(%d,%d,%d) blockDim:(%d,%d,%d) gridDim:(%d,%d,%d)\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv) {
//データ要素の合計数
int nElem = 6;
//グリッドとブロックの構造
dim3 block(3);
dim3 grid((nElem+block.x-1)/block.x);
//グリッドとブロックのサイズをホスト側からチェック
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
//グリッドとブロックのサイズをデバイス側からチェック
checkIndex<<<grid, block>>>();
//デバイスをリセット
cudaDeviceReset();
return 0;
} |
895951c773a2cfb049ebc9d78c53e7cbbb37b7c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file cuda_filter.cu
* @yzheng
* @CUDA mosaic filter
* @version 0.1
* @date 2019-05-07
*
* @copyright Copyright (c) 2019, yzheng
*
*/
#include <stdlib.h>
#include "filter.h"
#include "cuda_filter.h"
#define WARP_SIZE 32
#define MPR 32 // mosaic cells per row in one block
#define MPR_SQUARE (MPR * MPR)
#define MAX_TPB 256 // max thread per block
#define MAX_TPB_SQUARE (MAX_TPB * MAX_TPB)
#define DELTA(X) ((X) / MAX_TPB) // c / MAX_TPB, to get number of z axis
#define DELTA_SQUARE(X) ((X) * (X) / MAX_TPB_SQUARE)
/**
* @Memory check
*
* @param msg
*/
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* @Convert pixel array to cuda vector
*
* @param dest_pixels
* @param src_pixels
* @param cols
* @param rows
*/
void pixel_to_image(uchar3 *dest_pixels, pixel *src_pixels, int cols, int rows) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
int index = j * rows + i;
int offset = i * cols + j;
dest_pixels[index].x = src_pixels[offset].r;
dest_pixels[index].y = src_pixels[offset].g;
dest_pixels[index].z = src_pixels[offset].b;
}
}
}
/**
* @Convert cuda vector to pixel array
*
* @param dest_pixels
* @param src_pixels
* @param cols
* @param rows
*/
void image_to_pixel(pixel *dest_pixels, uchar3 *src_pixels, int cols, int rows) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
int index = j * rows + i;
int offset = i * cols + j;
dest_pixels[offset].r = src_pixels[index].x;
dest_pixels[offset].g = src_pixels[index].y;
dest_pixels[offset].b = src_pixels[index].z;
}
}
}
/**
* @Mosaic filter
*
* @param pixels_o
* @param pixels_i
* @param cols
* @param rows
* @param c
*/
void mosaic_transform_cuda(pixel *pixels_o, pixel *pixels_i, int cols, int rows, int c) {
// checking c
if (!(c & (c - 1)) == 0) {
fprintf(stderr, "Error: C should be the power of two in CUDA implementation.\n");
exit(1);
}
int cMPR = c * MPR; // for computing the grid size in third implementation
int area = cols * rows;
int grid_cols = (cols % c) ? cols / c + 1 : cols / c; // deal with partial mosaic
int grid_rows = (rows % c) ? rows / c + 1 : rows / c;
int shared_size = c * sizeof(float3); // the size of shared variable
int image_size = area * sizeof(uchar3);
int average_size = sizeof(float3);
int temp_size = DELTA_SQUARE(c) * sizeof(float3); // the teamp array used when c > MAX_TPB
float ms;
uchar3 *image_i, *image_o, *d_image_i, *d_image_o;
float3 *average, *d_average, *d_average_temp;
hipEvent_t start, stop;
// modify the size of the shared variable depends on the size
if (c <= 4 && rows / cMPR > 1 && cols / cMPR > 1) {
shared_size = MPR_SQUARE * sizeof(float3);
grid_cols = (cols % cMPR) ? cols / cMPR + 1 : cols / cMPR;
grid_rows = (rows % cMPR) ? rows / cMPR + 1 : rows / cMPR;
}
if (c > MAX_TPB) {
shared_size = MAX_TPB * sizeof(float3);
}
// create timers
hipEventCreate(&start); hipEventCreate(&stop);
// memory allocation on host
hipHostMalloc((void **)&image_i, image_size);
hipHostMalloc((void **)&image_o, image_size);
hipHostMalloc((void **)&average, average_size);
// memory allocation on device
hipMalloc((void **)&d_image_i, image_size);
hipMalloc((void **)&d_image_o, image_size);
hipMalloc((void **)&d_average, average_size);
hipMalloc((void **)&d_average_temp, temp_size);
checkCUDAError("CUDA malloc");
// convert pixel array to cuda vector
pixel_to_image(image_i, pixels_i, cols, rows);
// copy image from host to device
hipMemcpy(d_image_i, image_i, image_size, hipMemcpyHostToDevice);
checkCUDAError("CUDA memcpy");
hipEventRecord(start);
// implementation type from top to bottom:
// 32 x 32 mosaic cells per block
// c thread per block with z axis
// c thread per block without z axis
if (c <= 4 && rows / cMPR > 1 && cols / cMPR > 1) {
dim3 blocksPerGrid(grid_rows, grid_cols);
dim3 threadsPerBlock(MPR, MPR);
hipLaunchKernelGGL(( mosaic_filter_ccmpb), dim3(blocksPerGrid), dim3(threadsPerBlock), shared_size, 0, d_image_i, d_image_o, d_average, cols, rows, c);
} else if (c > MAX_TPB) {
dim3 blocksPerGrid(grid_rows, grid_cols, DELTA_SQUARE(c));
dim3 threadsPerBlock(MAX_TPB, 1);
hipLaunchKernelGGL(( mosaic_filter_cpb_z), dim3(blocksPerGrid), dim3(threadsPerBlock), shared_size, 0, d_image_i, d_average_temp, cols, rows, c);
hipLaunchKernelGGL(( mosaic_out_cpb_z), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_average_temp, d_image_o, d_average, cols, rows, c);
} else {
dim3 blocksPerGrid(grid_rows, grid_cols);
dim3 threadsPerBlock(c, 1);
hipLaunchKernelGGL(( mosaic_filter_cpb), dim3(blocksPerGrid), dim3(threadsPerBlock), shared_size, 0, d_image_i, d_image_o, d_average, cols, rows);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
checkCUDAError("CUDA kernel");
// copy output image and thread_average values from device to host
hipMemcpy(image_o, d_image_o, image_size, hipMemcpyDeviceToHost);
hipMemcpy(average, d_average, average_size, hipMemcpyDeviceToHost);
checkCUDAError("CUDA memcpy");
// convert cuda vector to pixel array
image_to_pixel(pixels_o, image_o, cols, rows);
pixel average_color = {
(unsigned char)(average->x / area),
(unsigned char)(average->y / area),
(unsigned char)(average->z / area)
};
// clean up
hipHostFree(image_o); hipHostFree(image_i); hipHostFree(average);
hipFree(d_image_i); hipFree(d_image_o); hipFree(d_average); hipFree(d_average_temp);
hipEventDestroy(start); hipEventDestroy(stop);
printf("CUDA mode execution time took %.3lf ms\n", ms);
printf("CUDA Average image colour red = %d, green = %d, blue = %d\n", average_color.r, average_color.g, average_color.b);
}
// -------------------------------- c threads per block --------------------------------------------
/**
* @Mosaic filter
*
* @param image
* @param image_output
* @param average
* @param cols
* @param rows
*/
__global__ void mosaic_filter_cpb(uchar3 *image, uchar3 *image_output, float3 *average, int cols, int rows) {
extern __shared__ float3 s_average[]; // sum of rgb values in one block/mosaic cell
int c = blockDim.x;
int x = threadIdx.x + blockIdx.x * c;
int y = threadIdx.y + blockIdx.y * c;
int m_area; // size of one mosaic cell
int mod_cols = cols % c;
int mod_rows = rows % c;
float3 thread_average = make_float3(0, 0, 0); // sum of one row in one block/mosaic cell
// calculate size for each mosaic cell depends on position (cope with partial mosaic)
mod_cols = (y < cols - mod_cols) ? c : mod_cols;
mod_rows = (x < rows - mod_rows) ? c : mod_rows;
m_area = mod_rows * mod_cols;
// traverse over one row of one mosaic cell
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
thread_average.x += image[offset].x;
thread_average.y += image[offset].y;
thread_average.z += image[offset].z;
}
}
// assign the sum of one row to shared variable
// perform reduction to get the sum for one block/mosaic cell
s_average[threadIdx.x] = thread_average;
__syncthreads();
// if c < 32, using warp level reduction, otherwise using the normal one
if (c <= WARP_SIZE) {
for (int stride = c / 2; stride != 0; stride >>= 1) {
s_average[threadIdx.x].x += __shfl_down_sync(0xffffffff, s_average[threadIdx.x].x, stride);
s_average[threadIdx.x].y += __shfl_down_sync(0xffffffff, s_average[threadIdx.x].y, stride);
s_average[threadIdx.x].z += __shfl_down_sync(0xffffffff, s_average[threadIdx.x].z, stride);
}
} else {
for (int stride = c / 2; stride != 0; stride >>= 1) {
if (threadIdx.x < stride) {
s_average[threadIdx.x].x += s_average[threadIdx.x + stride].x;
s_average[threadIdx.x].y += s_average[threadIdx.x + stride].y;
s_average[threadIdx.x].z += s_average[threadIdx.x + stride].z;
}
__syncthreads();
}
}
// atomic add to the sum of the entire image
// averaging operation is in the host function
if (threadIdx.x == 0 && threadIdx.y == 0) {
atomicAdd(&(average->x), s_average->x);
atomicAdd(&(average->y), s_average->y);
atomicAdd(&(average->z), s_average->z);
}
// assign the rgb value to the output image
thread_average.x = s_average->x / m_area;
thread_average.y = s_average->y / m_area;
thread_average.z = s_average->z / m_area;
// assign back to the output image
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
image_output[offset].x = (unsigned char)thread_average.x;
image_output[offset].y = (unsigned char)thread_average.y;
image_output[offset].z = (unsigned char)thread_average.z;
}
}
}
/**
* @Mosaic filter with z axis
*
* @param image
* @param image_output
* @param temp_average
* @param cols
* @param rows
* @param c
*/
__global__ void mosaic_filter_cpb_z(uchar3 *image, float3 *temp_average, int cols, int rows, int c) {
// almost the same as the above function
extern __shared__ float3 s_average[];
int delta = DELTA(c); // used to mapping the index below
int x = threadIdx.x + blockIdx.x * c + (blockIdx.z / delta) * MAX_TPB;
int y = threadIdx.y + blockIdx.y * c + (blockIdx.z % delta) * MAX_TPB;
int ibx = blockIdx.x + blockIdx.y * gridDim.x;
int mod_cols = cols % MAX_TPB;
float3 thread_average = make_float3(0, 0, 0);
mod_cols = (y < cols - mod_cols) ? MAX_TPB : mod_cols;
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
thread_average.x += image[offset].x;
thread_average.y += image[offset].y;
thread_average.z += image[offset].z;
}
}
s_average[threadIdx.x] = thread_average;
__syncthreads();
// the initialization of stride will use max thread per block
for (int stride = MAX_TPB / 2; stride != 0; stride >>= 1) {
if (threadIdx.x < stride) {
s_average[threadIdx.x].x += s_average[threadIdx.x + stride].x;
s_average[threadIdx.x].y += s_average[threadIdx.x + stride].y;
s_average[threadIdx.x].z += s_average[threadIdx.x + stride].z;
}
__syncthreads();
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
atomicAdd(&(temp_average[ibx].x), s_average->x);
atomicAdd(&(temp_average[ibx].y), s_average->y);
atomicAdd(&(temp_average[ibx].z), s_average->z);
}
}
/**
* @Sum up and generate the output of the mosaic_filter_cpb_z()
*
* @param temp_average
* @param image_output
* @param average
* @param cols
* @param rows
* @param c
*/
__global__ void mosaic_out_cpb_z(float3 *temp_average, uchar3 *image_output, float3 *average, int cols, int rows, int c) {
// the same mapping in mosaic_filter_cpb_z()
int delta = DELTA(c);
int x = threadIdx.x + blockIdx.x * c + (blockIdx.z / delta) * MAX_TPB;
int y = threadIdx.y + blockIdx.y * c + (blockIdx.z % delta) * MAX_TPB;
int ibx = blockIdx.x + blockIdx.y * gridDim.x;
int m_area;
int mod_cols = cols % c;
int mod_rows = rows % c;
float3 m_average = temp_average[ibx];
mod_cols = (y < cols - mod_cols) ? c : mod_cols;
mod_rows = (x < rows - mod_rows) ? c : mod_rows;
m_area = mod_rows * mod_cols;
// modify it to ensure the partial calculation will not exceed the boundary
mod_cols = (y < cols - cols % MAX_TPB) ? MAX_TPB : cols % MAX_TPB;
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
image_output[offset].x = (unsigned char)(m_average.x / m_area);
image_output[offset].y = (unsigned char)(m_average.y / m_area);
image_output[offset].z = (unsigned char)(m_average.z / m_area);
}
}
if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.z == 0) {
atomicAdd(&(average->x), m_average.x);
atomicAdd(&(average->y), m_average.y);
atomicAdd(&(average->z), m_average.z);
}
}
// ---------------------------- 32 x 32 mosaic cells per block ---------------------------------
/**
* @Mosaic filter (32 x 32 mosaic cells per block)
*
* @param image
* @param image_output
* @param average
* @param cols
* @param rows
* @param c
*/
__global__ void mosaic_filter_ccmpb(uchar3 *image, uchar3 *image_output, float3 *average, int cols, int rows, int c) {
extern __shared__ float3 s_average[];
// mapping the index to position
int x = (threadIdx.x + blockIdx.x * MPR) * c;
int y = (threadIdx.y + blockIdx.y * MPR) * c;
int itx = threadIdx.x + threadIdx.y * MPR;
int m_area;
int mod_cols = cols % c;
int mod_rows = rows % c;
float3 m_average = make_float3(0, 0, 0);
// calculation the size to deal with partial mosaic
mod_cols = (y < cols - mod_cols) ? c : mod_cols;
mod_rows = (x < rows - mod_rows) ? c : mod_rows;
m_area = mod_rows * mod_cols;
// using for loop to sum up the RGB to the register
if (x < rows && y < cols) {
for (int i = 0; i < c; ++i) {
for (int j = 0; j < c; ++j) {
int x_offset = x + i;
int y_offset = y + j;
int offset = x_offset + y_offset * rows;
m_average.x += image[offset].x;
m_average.y += image[offset].y;
m_average.z += image[offset].z;
}
}
}
// do reduction with shared variable
s_average[itx] = m_average;
__syncthreads();
for (int stride = MPR_SQUARE / 2; stride != 0; stride >>= 1) {
if (itx < stride) {
s_average[itx].x += s_average[itx + stride].x;
s_average[itx].y += s_average[itx + stride].y;
s_average[itx].z += s_average[itx + stride].z;
}
__syncthreads();
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
atomicAdd(&(average->x), s_average[itx].x);
atomicAdd(&(average->y), s_average[itx].y);
atomicAdd(&(average->z), s_average[itx].z);
}
if (x < rows && y < cols) {
for (int i = 0; i < c; ++i) {
for (int j = 0; j < c; ++j) {
int x_offset = x + i;
int y_offset = y + j;
int offset = x_offset + y_offset * rows;
image_output[offset].x = (unsigned char)(m_average.x / m_area);
image_output[offset].y = (unsigned char)(m_average.y / m_area);
image_output[offset].z = (unsigned char)(m_average.z / m_area);
}
}
}
}
| 895951c773a2cfb049ebc9d78c53e7cbbb37b7c0.cu | /**
* @file cuda_filter.cu
* @yzheng
* @CUDA mosaic filter
* @version 0.1
* @date 2019-05-07
*
* @copyright Copyright (c) 2019, yzheng
*
*/
#include <stdlib.h>
#include "filter.h"
#include "cuda_filter.h"
#define WARP_SIZE 32
#define MPR 32 // mosaic cells per row in one block
#define MPR_SQUARE (MPR * MPR)
#define MAX_TPB 256 // max thread per block
#define MAX_TPB_SQUARE (MAX_TPB * MAX_TPB)
#define DELTA(X) ((X) / MAX_TPB) // c / MAX_TPB, to get number of z axis
#define DELTA_SQUARE(X) ((X) * (X) / MAX_TPB_SQUARE)
/**
* @Memory check
*
* @param msg
*/
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* @Convert pixel array to cuda vector
*
* @param dest_pixels
* @param src_pixels
* @param cols
* @param rows
*/
void pixel_to_image(uchar3 *dest_pixels, pixel *src_pixels, int cols, int rows) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
int index = j * rows + i;
int offset = i * cols + j;
dest_pixels[index].x = src_pixels[offset].r;
dest_pixels[index].y = src_pixels[offset].g;
dest_pixels[index].z = src_pixels[offset].b;
}
}
}
/**
* @Convert cuda vector to pixel array
*
* @param dest_pixels
* @param src_pixels
* @param cols
* @param rows
*/
void image_to_pixel(pixel *dest_pixels, uchar3 *src_pixels, int cols, int rows) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
int index = j * rows + i;
int offset = i * cols + j;
dest_pixels[offset].r = src_pixels[index].x;
dest_pixels[offset].g = src_pixels[index].y;
dest_pixels[offset].b = src_pixels[index].z;
}
}
}
/**
* @Mosaic filter
*
* @param pixels_o
* @param pixels_i
* @param cols
* @param rows
* @param c
*/
void mosaic_transform_cuda(pixel *pixels_o, pixel *pixels_i, int cols, int rows, int c) {
// checking c
if (!(c & (c - 1)) == 0) {
fprintf(stderr, "Error: C should be the power of two in CUDA implementation.\n");
exit(1);
}
int cMPR = c * MPR; // for computing the grid size in third implementation
int area = cols * rows;
int grid_cols = (cols % c) ? cols / c + 1 : cols / c; // deal with partial mosaic
int grid_rows = (rows % c) ? rows / c + 1 : rows / c;
int shared_size = c * sizeof(float3); // the size of shared variable
int image_size = area * sizeof(uchar3);
int average_size = sizeof(float3);
int temp_size = DELTA_SQUARE(c) * sizeof(float3); // the teamp array used when c > MAX_TPB
float ms;
uchar3 *image_i, *image_o, *d_image_i, *d_image_o;
float3 *average, *d_average, *d_average_temp;
cudaEvent_t start, stop;
// modify the size of the shared variable depends on the size
if (c <= 4 && rows / cMPR > 1 && cols / cMPR > 1) {
shared_size = MPR_SQUARE * sizeof(float3);
grid_cols = (cols % cMPR) ? cols / cMPR + 1 : cols / cMPR;
grid_rows = (rows % cMPR) ? rows / cMPR + 1 : rows / cMPR;
}
if (c > MAX_TPB) {
shared_size = MAX_TPB * sizeof(float3);
}
// create timers
cudaEventCreate(&start); cudaEventCreate(&stop);
// memory allocation on host
cudaMallocHost((void **)&image_i, image_size);
cudaMallocHost((void **)&image_o, image_size);
cudaMallocHost((void **)&average, average_size);
// memory allocation on device
cudaMalloc((void **)&d_image_i, image_size);
cudaMalloc((void **)&d_image_o, image_size);
cudaMalloc((void **)&d_average, average_size);
cudaMalloc((void **)&d_average_temp, temp_size);
checkCUDAError("CUDA malloc");
// convert pixel array to cuda vector
pixel_to_image(image_i, pixels_i, cols, rows);
// copy image from host to device
cudaMemcpy(d_image_i, image_i, image_size, cudaMemcpyHostToDevice);
checkCUDAError("CUDA memcpy");
cudaEventRecord(start);
// implementation type from top to bottom:
// 32 x 32 mosaic cells per block
// c thread per block with z axis
// c thread per block without z axis
if (c <= 4 && rows / cMPR > 1 && cols / cMPR > 1) {
dim3 blocksPerGrid(grid_rows, grid_cols);
dim3 threadsPerBlock(MPR, MPR);
mosaic_filter_ccmpb<<<blocksPerGrid, threadsPerBlock, shared_size>>>(d_image_i, d_image_o, d_average, cols, rows, c);
} else if (c > MAX_TPB) {
dim3 blocksPerGrid(grid_rows, grid_cols, DELTA_SQUARE(c));
dim3 threadsPerBlock(MAX_TPB, 1);
mosaic_filter_cpb_z<<<blocksPerGrid, threadsPerBlock, shared_size>>>(d_image_i, d_average_temp, cols, rows, c);
mosaic_out_cpb_z<<<blocksPerGrid, threadsPerBlock>>>(d_average_temp, d_image_o, d_average, cols, rows, c);
} else {
dim3 blocksPerGrid(grid_rows, grid_cols);
dim3 threadsPerBlock(c, 1);
mosaic_filter_cpb<<<blocksPerGrid, threadsPerBlock, shared_size>>>(d_image_i, d_image_o, d_average, cols, rows);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
checkCUDAError("CUDA kernel");
// copy output image and thread_average values from device to host
cudaMemcpy(image_o, d_image_o, image_size, cudaMemcpyDeviceToHost);
cudaMemcpy(average, d_average, average_size, cudaMemcpyDeviceToHost);
checkCUDAError("CUDA memcpy");
// convert cuda vector to pixel array
image_to_pixel(pixels_o, image_o, cols, rows);
pixel average_color = {
(unsigned char)(average->x / area),
(unsigned char)(average->y / area),
(unsigned char)(average->z / area)
};
// clean up
cudaFreeHost(image_o); cudaFreeHost(image_i); cudaFreeHost(average);
cudaFree(d_image_i); cudaFree(d_image_o); cudaFree(d_average); cudaFree(d_average_temp);
cudaEventDestroy(start); cudaEventDestroy(stop);
printf("CUDA mode execution time took %.3lf ms\n", ms);
printf("CUDA Average image colour red = %d, green = %d, blue = %d\n", average_color.r, average_color.g, average_color.b);
}
// -------------------------------- c threads per block --------------------------------------------
/**
* @Mosaic filter
*
* @param image
* @param image_output
* @param average
* @param cols
* @param rows
*/
__global__ void mosaic_filter_cpb(uchar3 *image, uchar3 *image_output, float3 *average, int cols, int rows) {
extern __shared__ float3 s_average[]; // sum of rgb values in one block/mosaic cell
int c = blockDim.x;
int x = threadIdx.x + blockIdx.x * c;
int y = threadIdx.y + blockIdx.y * c;
int m_area; // size of one mosaic cell
int mod_cols = cols % c;
int mod_rows = rows % c;
float3 thread_average = make_float3(0, 0, 0); // sum of one row in one block/mosaic cell
// calculate size for each mosaic cell depends on position (cope with partial mosaic)
mod_cols = (y < cols - mod_cols) ? c : mod_cols;
mod_rows = (x < rows - mod_rows) ? c : mod_rows;
m_area = mod_rows * mod_cols;
// traverse over one row of one mosaic cell
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
thread_average.x += image[offset].x;
thread_average.y += image[offset].y;
thread_average.z += image[offset].z;
}
}
// assign the sum of one row to shared variable
// perform reduction to get the sum for one block/mosaic cell
s_average[threadIdx.x] = thread_average;
__syncthreads();
// if c < 32, using warp level reduction, otherwise using the normal one
if (c <= WARP_SIZE) {
for (int stride = c / 2; stride != 0; stride >>= 1) {
s_average[threadIdx.x].x += __shfl_down_sync(0xffffffff, s_average[threadIdx.x].x, stride);
s_average[threadIdx.x].y += __shfl_down_sync(0xffffffff, s_average[threadIdx.x].y, stride);
s_average[threadIdx.x].z += __shfl_down_sync(0xffffffff, s_average[threadIdx.x].z, stride);
}
} else {
for (int stride = c / 2; stride != 0; stride >>= 1) {
if (threadIdx.x < stride) {
s_average[threadIdx.x].x += s_average[threadIdx.x + stride].x;
s_average[threadIdx.x].y += s_average[threadIdx.x + stride].y;
s_average[threadIdx.x].z += s_average[threadIdx.x + stride].z;
}
__syncthreads();
}
}
// atomic add to the sum of the entire image
// averaging operation is in the host function
if (threadIdx.x == 0 && threadIdx.y == 0) {
atomicAdd(&(average->x), s_average->x);
atomicAdd(&(average->y), s_average->y);
atomicAdd(&(average->z), s_average->z);
}
// assign the rgb value to the output image
thread_average.x = s_average->x / m_area;
thread_average.y = s_average->y / m_area;
thread_average.z = s_average->z / m_area;
// assign back to the output image
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
image_output[offset].x = (unsigned char)thread_average.x;
image_output[offset].y = (unsigned char)thread_average.y;
image_output[offset].z = (unsigned char)thread_average.z;
}
}
}
/**
* @Mosaic filter with z axis
*
* @param image
* @param image_output
* @param temp_average
* @param cols
* @param rows
* @param c
*/
__global__ void mosaic_filter_cpb_z(uchar3 *image, float3 *temp_average, int cols, int rows, int c) {
// almost the same as the above function
extern __shared__ float3 s_average[];
int delta = DELTA(c); // used to mapping the index below
int x = threadIdx.x + blockIdx.x * c + (blockIdx.z / delta) * MAX_TPB;
int y = threadIdx.y + blockIdx.y * c + (blockIdx.z % delta) * MAX_TPB;
int ibx = blockIdx.x + blockIdx.y * gridDim.x;
int mod_cols = cols % MAX_TPB;
float3 thread_average = make_float3(0, 0, 0);
mod_cols = (y < cols - mod_cols) ? MAX_TPB : mod_cols;
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
thread_average.x += image[offset].x;
thread_average.y += image[offset].y;
thread_average.z += image[offset].z;
}
}
s_average[threadIdx.x] = thread_average;
__syncthreads();
// the initialization of stride will use max thread per block
for (int stride = MAX_TPB / 2; stride != 0; stride >>= 1) {
if (threadIdx.x < stride) {
s_average[threadIdx.x].x += s_average[threadIdx.x + stride].x;
s_average[threadIdx.x].y += s_average[threadIdx.x + stride].y;
s_average[threadIdx.x].z += s_average[threadIdx.x + stride].z;
}
__syncthreads();
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
atomicAdd(&(temp_average[ibx].x), s_average->x);
atomicAdd(&(temp_average[ibx].y), s_average->y);
atomicAdd(&(temp_average[ibx].z), s_average->z);
}
}
/**
* @Sum up and generate the output of the mosaic_filter_cpb_z()
*
* @param temp_average
* @param image_output
* @param average
* @param cols
* @param rows
* @param c
*/
__global__ void mosaic_out_cpb_z(float3 *temp_average, uchar3 *image_output, float3 *average, int cols, int rows, int c) {
// the same mapping in mosaic_filter_cpb_z()
int delta = DELTA(c);
int x = threadIdx.x + blockIdx.x * c + (blockIdx.z / delta) * MAX_TPB;
int y = threadIdx.y + blockIdx.y * c + (blockIdx.z % delta) * MAX_TPB;
int ibx = blockIdx.x + blockIdx.y * gridDim.x;
int m_area;
int mod_cols = cols % c;
int mod_rows = rows % c;
float3 m_average = temp_average[ibx];
mod_cols = (y < cols - mod_cols) ? c : mod_cols;
mod_rows = (x < rows - mod_rows) ? c : mod_rows;
m_area = mod_rows * mod_cols;
// modify it to ensure the partial calculation will not exceed the boundary
mod_cols = (y < cols - cols % MAX_TPB) ? MAX_TPB : cols % MAX_TPB;
if (x < rows && y < cols) {
for (int j = 0; j < mod_cols; ++j) {
int y_offset = y + j;
int offset = x + y_offset * rows;
image_output[offset].x = (unsigned char)(m_average.x / m_area);
image_output[offset].y = (unsigned char)(m_average.y / m_area);
image_output[offset].z = (unsigned char)(m_average.z / m_area);
}
}
if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.z == 0) {
atomicAdd(&(average->x), m_average.x);
atomicAdd(&(average->y), m_average.y);
atomicAdd(&(average->z), m_average.z);
}
}
// ---------------------------- 32 x 32 mosaic cells per block ---------------------------------
/**
* @Mosaic filter (32 x 32 mosaic cells per block)
*
* @param image
* @param image_output
* @param average
* @param cols
* @param rows
* @param c
*/
__global__ void mosaic_filter_ccmpb(uchar3 *image, uchar3 *image_output, float3 *average, int cols, int rows, int c) {
extern __shared__ float3 s_average[];
// mapping the index to position
int x = (threadIdx.x + blockIdx.x * MPR) * c;
int y = (threadIdx.y + blockIdx.y * MPR) * c;
int itx = threadIdx.x + threadIdx.y * MPR;
int m_area;
int mod_cols = cols % c;
int mod_rows = rows % c;
float3 m_average = make_float3(0, 0, 0);
// calculation the size to deal with partial mosaic
mod_cols = (y < cols - mod_cols) ? c : mod_cols;
mod_rows = (x < rows - mod_rows) ? c : mod_rows;
m_area = mod_rows * mod_cols;
// using for loop to sum up the RGB to the register
if (x < rows && y < cols) {
for (int i = 0; i < c; ++i) {
for (int j = 0; j < c; ++j) {
int x_offset = x + i;
int y_offset = y + j;
int offset = x_offset + y_offset * rows;
m_average.x += image[offset].x;
m_average.y += image[offset].y;
m_average.z += image[offset].z;
}
}
}
// do reduction with shared variable
s_average[itx] = m_average;
__syncthreads();
for (int stride = MPR_SQUARE / 2; stride != 0; stride >>= 1) {
if (itx < stride) {
s_average[itx].x += s_average[itx + stride].x;
s_average[itx].y += s_average[itx + stride].y;
s_average[itx].z += s_average[itx + stride].z;
}
__syncthreads();
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
atomicAdd(&(average->x), s_average[itx].x);
atomicAdd(&(average->y), s_average[itx].y);
atomicAdd(&(average->z), s_average[itx].z);
}
if (x < rows && y < cols) {
for (int i = 0; i < c; ++i) {
for (int j = 0; j < c; ++j) {
int x_offset = x + i;
int y_offset = y + j;
int offset = x_offset + y_offset * rows;
image_output[offset].x = (unsigned char)(m_average.x / m_area);
image_output[offset].y = (unsigned char)(m_average.y / m_area);
image_output[offset].z = (unsigned char)(m_average.z / m_area);
}
}
}
}
|
0799ea37e09757d2c5dc236e1ad7feb6aaa2536d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kNN-brute-force-bitonic.cuh"
#include "kNN-brute-force-reduce.cuh"
#include "knn_gpgpu.h"
#include "reduction-mod.cuh"
#include <stdio.h>
#include <math.h>
#include "helper_cuda.h"
#define SHARED_SIZE_LIMIT 512U
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
__constant__ float d_query[3];
__global__ void cuComputeDistance( float *ref, int ref_nb , int dim, Distance *dist)
{
float dx, dy, dz;
int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < ref_nb)
{
dx = ref[index * dim] - d_query[0];
dy = ref[index * dim + 1] - d_query[1];
dz = ref[index * dim + 2] - d_query[2];
dist[index].value = pow(dx, 2) + pow(dy, 2) + pow(dz, 2);
dist[index].index = index;
index += gridDim.x * blockDim.x;
}
}
__global__ void cuParallelSqrt(Distance *dist, int k)
{
int xIndex = blockIdx.x;
if (xIndex < k)
{
dist[xIndex].value = rsqrt(dist[xIndex].value);
}
}
void knn_brute_force(float *h_ref, int ref_nb, float *h_query, int dim, int k, float *dist, int *ind)
{
float *d_ref;
Distance *d_dist, *h_dist;
int i;
h_dist = (Distance *) malloc(k * sizeof(Distance));
checkCudaErrors(hipMalloc( (void **) &d_dist, ref_nb * sizeof(Distance)));
checkCudaErrors(hipMalloc( (void **) &d_ref, ref_nb * sizeof(float) * dim));
checkCudaErrors(hipMemcpy(d_ref, h_ref, ref_nb * dim * sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(d_query, h_query, dim * sizeof(float)));
int threadCount = min(ref_nb, SHARED_SIZE_LIMIT);
int blockCount = ref_nb / threadCount;
blockCount = min(blockCount, 65536);
hipLaunchKernelGGL(( cuComputeDistance) , dim3(blockCount), dim3(threadCount), 0, 0, d_ref, ref_nb, dim, d_dist);
for (i = 0; i < k; ++i)
{
dist_min_reduce(d_dist + i, ref_nb - i);
}
hipLaunchKernelGGL(( cuParallelSqrt) , dim3(k), dim3(1), 0, 0, d_dist, k);
checkCudaErrors(hipMemcpy(h_dist, d_dist, k * sizeof(Distance), hipMemcpyDeviceToHost));
for (i = 0; i < k; ++i)
{
dist[i] = h_dist[i].value;
ind[i] = h_dist[i].index;
}
checkCudaErrors(hipFree(d_ref));
checkCudaErrors(hipFree(d_dist));
}
| 0799ea37e09757d2c5dc236e1ad7feb6aaa2536d.cu |
#include "kNN-brute-force-bitonic.cuh"
#include "kNN-brute-force-reduce.cuh"
#include "knn_gpgpu.h"
#include "reduction-mod.cuh"
#include <stdio.h>
#include <math.h>
#include "helper_cuda.h"
#define SHARED_SIZE_LIMIT 512U
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
__constant__ float d_query[3];
__global__ void cuComputeDistance( float *ref, int ref_nb , int dim, Distance *dist)
{
float dx, dy, dz;
int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < ref_nb)
{
dx = ref[index * dim] - d_query[0];
dy = ref[index * dim + 1] - d_query[1];
dz = ref[index * dim + 2] - d_query[2];
dist[index].value = pow(dx, 2) + pow(dy, 2) + pow(dz, 2);
dist[index].index = index;
index += gridDim.x * blockDim.x;
}
}
__global__ void cuParallelSqrt(Distance *dist, int k)
{
int xIndex = blockIdx.x;
if (xIndex < k)
{
dist[xIndex].value = rsqrt(dist[xIndex].value);
}
}
void knn_brute_force(float *h_ref, int ref_nb, float *h_query, int dim, int k, float *dist, int *ind)
{
float *d_ref;
Distance *d_dist, *h_dist;
int i;
h_dist = (Distance *) malloc(k * sizeof(Distance));
checkCudaErrors(cudaMalloc( (void **) &d_dist, ref_nb * sizeof(Distance)));
checkCudaErrors(cudaMalloc( (void **) &d_ref, ref_nb * sizeof(float) * dim));
checkCudaErrors(cudaMemcpy(d_ref, h_ref, ref_nb * dim * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(d_query, h_query, dim * sizeof(float)));
int threadCount = min(ref_nb, SHARED_SIZE_LIMIT);
int blockCount = ref_nb / threadCount;
blockCount = min(blockCount, 65536);
cuComputeDistance <<< blockCount, threadCount>>>(d_ref, ref_nb, dim, d_dist);
for (i = 0; i < k; ++i)
{
dist_min_reduce(d_dist + i, ref_nb - i);
}
cuParallelSqrt <<< k, 1>>>(d_dist, k);
checkCudaErrors(cudaMemcpy(h_dist, d_dist, k * sizeof(Distance), cudaMemcpyDeviceToHost));
for (i = 0; i < k; ++i)
{
dist[i] = h_dist[i].value;
ind[i] = h_dist[i].index;
}
checkCudaErrors(cudaFree(d_ref));
checkCudaErrors(cudaFree(d_dist));
}
|
024a8f06612cd58b3e6fcb9a1d2009348abf683c.hip | // !!! This is a file automatically generated by hipify!!!
#include "Convergence_GPU_julia.hpp"
#include "kernel_GPU_julia.cuh"
#include "hip/hip_runtime.h"
inline bool CUDA_MALLOC( void ** devPtr, size_t size ) {
hipError_t cudaStatus;
cudaStatus = hipMalloc( devPtr, size );
if ( cudaStatus != hipSuccess ) {
printf( "error: unable to allocate buffer\n");
return false;
}
return true;
}
inline bool CUDA_MEMCPY( void * dst, const void * src, size_t count, enum hipMemcpyKind kind ) {
hipError_t cudaStatus;
cudaStatus = hipMemcpy( dst, src, count, kind );
if ( cudaStatus != hipSuccess ) {
printf( "error: unable to copy buffer\n");
return false;
}
return true;
}
Convergence_GPU_julia::Convergence_GPU_julia() : Convergence("GPU_double_julia")
{
}
Convergence_GPU_julia::Convergence_GPU_julia(ColorMap* _colors, int _max_iters) : Convergence("GPU_double_julia")
{
colors = _colors;
max_iters = _max_iters;
hostTab = nullptr;
deviceTab = nullptr;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if ( cudaStatus != hipSuccess ) {
printf( "error: unable to setup cuda device\n");
exit(0);
}
}
Convergence_GPU_julia::~Convergence_GPU_julia( ){
hipError_t cudaStatus = hipDeviceReset();
free(hostTab);
free(deviceTab);
}
void Convergence_GPU_julia::updateImage(const long double _zoom, const long double _offsetX, const long double _offsetY, const int IMAGE_WIDTH, const int IMAGE_HEIGHT, sf::Image& image)
{
int nb_point = IMAGE_WIDTH*IMAGE_HEIGHT;
dim3 grid(80,50,1); //nbr bloc
dim3 block(16,16,1); //nbr threads
if(hostTab == nullptr)
hostTab = new uint32_t[nb_point];
if(deviceTab == nullptr)
CUDA_MALLOC((void**)&deviceTab, nb_point * sizeof(uint32_t));
double offsetX = _offsetX;
double offsetY = _offsetX;
double zoom = _zoom;
hipLaunchKernelGGL(( kernel_updateImage_GPU_julia), dim3(grid), dim3(block), 0, 0, zoom, offsetX, offsetY, IMAGE_WIDTH, IMAGE_HEIGHT, deviceTab, max_iters);
CUDA_MEMCPY(hostTab, deviceTab, nb_point*sizeof(uint32_t), hipMemcpyDeviceToHost);
for(int y = 0; y < IMAGE_HEIGHT; y++)
{
for(int x = 0; x < IMAGE_WIDTH; x++)
{
image.setPixel(x, y, colors->getColor(hostTab[x+y*IMAGE_WIDTH]));
}
}
} | 024a8f06612cd58b3e6fcb9a1d2009348abf683c.cu | #include "Convergence_GPU_julia.hpp"
#include "kernel_GPU_julia.cuh"
#include "cuda_runtime.h"
inline bool CUDA_MALLOC( void ** devPtr, size_t size ) {
cudaError_t cudaStatus;
cudaStatus = cudaMalloc( devPtr, size );
if ( cudaStatus != cudaSuccess ) {
printf( "error: unable to allocate buffer\n");
return false;
}
return true;
}
inline bool CUDA_MEMCPY( void * dst, const void * src, size_t count, enum cudaMemcpyKind kind ) {
cudaError_t cudaStatus;
cudaStatus = cudaMemcpy( dst, src, count, kind );
if ( cudaStatus != cudaSuccess ) {
printf( "error: unable to copy buffer\n");
return false;
}
return true;
}
Convergence_GPU_julia::Convergence_GPU_julia() : Convergence("GPU_double_julia")
{
}
Convergence_GPU_julia::Convergence_GPU_julia(ColorMap* _colors, int _max_iters) : Convergence("GPU_double_julia")
{
colors = _colors;
max_iters = _max_iters;
hostTab = nullptr;
deviceTab = nullptr;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if ( cudaStatus != cudaSuccess ) {
printf( "error: unable to setup cuda device\n");
exit(0);
}
}
Convergence_GPU_julia::~Convergence_GPU_julia( ){
cudaError_t cudaStatus = cudaDeviceReset();
free(hostTab);
free(deviceTab);
}
void Convergence_GPU_julia::updateImage(const long double _zoom, const long double _offsetX, const long double _offsetY, const int IMAGE_WIDTH, const int IMAGE_HEIGHT, sf::Image& image)
{
int nb_point = IMAGE_WIDTH*IMAGE_HEIGHT;
dim3 grid(80,50,1); //nbr bloc
dim3 block(16,16,1); //nbr threads
if(hostTab == nullptr)
hostTab = new uint32_t[nb_point];
if(deviceTab == nullptr)
CUDA_MALLOC((void**)&deviceTab, nb_point * sizeof(uint32_t));
double offsetX = _offsetX;
double offsetY = _offsetX;
double zoom = _zoom;
kernel_updateImage_GPU_julia<<<grid, block>>>(zoom, offsetX, offsetY, IMAGE_WIDTH, IMAGE_HEIGHT, deviceTab, max_iters);
CUDA_MEMCPY(hostTab, deviceTab, nb_point*sizeof(uint32_t), cudaMemcpyDeviceToHost);
for(int y = 0; y < IMAGE_HEIGHT; y++)
{
for(int x = 0; x < IMAGE_WIDTH; x++)
{
image.setPixel(x, y, colors->getColor(hostTab[x+y*IMAGE_WIDTH]));
}
}
} |
c8290aa91688b2fa09c2acee7b900b4afc496440.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
// Default initial condition
/*
*((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
//real sv_sst[]={-86.6337556349546,0.00127215057254844,0.781315329700828,0.781192702879389,0.000173232959601247,0.485771934772721,0.00292661184320977,0.999998369627955,1.91248713554218e-08,1.87462257542883e-05,0.999765973534775,1.00688195901693,0.999991331074147,5.01588072510622e-05,0.719318246052902,9.82154696449291,139.637347751159};
real sv_sst[]={-86.6404915792850,0.00127032163211322,0.781479753157976,0.781360816517016,0.000172969600594225,0.485842045427499,0.00292520813217015,0.999998371823369,1.91034113695031e-08,1.87293970187045e-05,0.999771221267447,1.00691525856031,0.999992103392003,4.93846276389813e-05,0.695256716079829,9.83880114557068,139.633017313049};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
// real Vmaxup=0.000425f;
real Vmaxup=0.000714016847624717;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
// real Gkr=0.096;
real Gkr=0.129819327185159;
//Parameters for Iks
real pKNa=0.03;
#ifdef EPI
// real Gks=0.245;
real Gks=0.227808856917217;
#endif
#ifdef ENDO
real Gks=0.245;
#endif
#ifdef MCELL
real Gks=0.062;
#endif
//Parameters for Ik1
//real GK1=5.405;
real GK1=3.92366049957936;
//Parameters for Ito
#ifdef EPI
// real Gto=0.294;
real Gto=0.290683783819880;
#endif
#ifdef ENDO
real Gto=0.073;
#endif
#ifdef MCELL
real Gto=0.294;
#endif
//Parameters for INa
// real GNa=14.838;
real GNa=13.4587995801200;
//Parameters for IbNa
// real GbNa=0.00029;
real GbNa=0.000132990931598298;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
// real knak=1.362;
real knak=2.84430638940750;
//Parameters for ICaL
// real GCaL=0.000175;
real GCaL=0.000158212114858015;
//Parameters for IbCa
// real GbCa=0.000592;
real GbCa=0.000706297098320405;
//Parameters for INaCa
// real knaca=1000;
real knaca=1096.43133943582;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
// real GpCa=0.825;
real GpCa=0.390810222439592;
real KpCa=0.0005;
//Parameters for IpK;
// real GpK=0.0146;
real GpK=0.0199551557341385;
// Setting Elnaz's parameters
real parameters []={14.4701107547473,0.000162061905578968,0.000188488521383406,0.000572929459830166,0.335244898151308,0.119541023695594,0.248924317567785,5.19603253018384,0.0221271053316735,2.03169412747953,1099.72574265209,0.000483122952800270,0.478907546954075,0.0199668557152203,0.00562797831559110,3.64128969863145e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
//A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
// Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrtf(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| c8290aa91688b2fa09c2acee7b900b4afc496440.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
// Default initial condition
/*
*((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
//real sv_sst[]={-86.6337556349546,0.00127215057254844,0.781315329700828,0.781192702879389,0.000173232959601247,0.485771934772721,0.00292661184320977,0.999998369627955,1.91248713554218e-08,1.87462257542883e-05,0.999765973534775,1.00688195901693,0.999991331074147,5.01588072510622e-05,0.719318246052902,9.82154696449291,139.637347751159};
real sv_sst[]={-86.6404915792850,0.00127032163211322,0.781479753157976,0.781360816517016,0.000172969600594225,0.485842045427499,0.00292520813217015,0.999998371823369,1.91034113695031e-08,1.87293970187045e-05,0.999771221267447,1.00691525856031,0.999992103392003,4.93846276389813e-05,0.695256716079829,9.83880114557068,139.633017313049};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
// real Vmaxup=0.000425f;
real Vmaxup=0.000714016847624717;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
// real Gkr=0.096;
real Gkr=0.129819327185159;
//Parameters for Iks
real pKNa=0.03;
#ifdef EPI
// real Gks=0.245;
real Gks=0.227808856917217;
#endif
#ifdef ENDO
real Gks=0.245;
#endif
#ifdef MCELL
real Gks=0.062;
#endif
//Parameters for Ik1
//real GK1=5.405;
real GK1=3.92366049957936;
//Parameters for Ito
#ifdef EPI
// real Gto=0.294;
real Gto=0.290683783819880;
#endif
#ifdef ENDO
real Gto=0.073;
#endif
#ifdef MCELL
real Gto=0.294;
#endif
//Parameters for INa
// real GNa=14.838;
real GNa=13.4587995801200;
//Parameters for IbNa
// real GbNa=0.00029;
real GbNa=0.000132990931598298;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
// real knak=1.362;
real knak=2.84430638940750;
//Parameters for ICaL
// real GCaL=0.000175;
real GCaL=0.000158212114858015;
//Parameters for IbCa
// real GbCa=0.000592;
real GbCa=0.000706297098320405;
//Parameters for INaCa
// real knaca=1000;
real knaca=1096.43133943582;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
// real GpCa=0.825;
real GpCa=0.390810222439592;
real KpCa=0.0005;
//Parameters for IpK;
// real GpK=0.0146;
real GpK=0.0199551557341385;
// Setting Elnaz's parameters
real parameters []={14.4701107547473,0.000162061905578968,0.000188488521383406,0.000572929459830166,0.335244898151308,0.119541023695594,0.248924317567785,5.19603253018384,0.0221271053316735,2.03169412747953,1099.72574265209,0.000483122952800270,0.478907546954075,0.0199668557152203,0.00562797831559110,3.64128969863145e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
//A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
// Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrtf(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
48e8717a1739a33c137b530af5f3f2284e1649db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
namespace SCRIMP {
#define BLOCKSZ_SP 512
#define BLOCKSZ_DP 256
#define BLOCKSPERSM_SELF 2
#define BLOCKSPERSM_AB 2
#define TILE_HEIGHT 200
#define TILE_HEIGHT_DP 200
//Atomically updates the MP/idxs using a single 64-bit integer. We lose a small amount of precision in the output, if we do not do this we are unable
// to atomically update both the matrix profile and the indexes without using a critical section and dedicated locks.
__device__ inline void MPatomicMax(volatile unsigned long long int* address, float val, unsigned int idx)
{
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val){
loctest.ulong = atomicCAS((unsigned long long int*) address, loctest.ulong, loc.ulong);
}
}
// As above, but checks a previously read value before attempting another read
// This allows us to exploit vectorized loads of the matrix profile
__device__ inline void MPatomicMax_check(volatile unsigned long long int* __restrict__ address, float val, unsigned int idx, float curr_val)
{
if(val > curr_val) {
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val){
loctest.ulong = atomicCAS((unsigned long long int*) address, loctest.ulong, loc.ulong);
}
}
}
__device__ inline void MPMax(const float d1, const float d2, const unsigned int i1,
const unsigned int i2, float &outd, unsigned int &outi)
{
if(d1 >= d2) {
outd = d1;
outi = i1;
} else {
outd = d2;
outi = i2;
}
}
// Computes max(a,b) with index and stores the result in a
__device__ inline void MPMax2(float &d1, const float &d2, unsigned int &i1,
const unsigned int &i2)
{
if(d2 > d1) {
d1 = d2;
i1 = i2;
}
}
// Computes the max of 4 values in a float 4
__device__ inline float max4(const float4 &d, const unsigned int init, unsigned int &idx) {
float ret = d.x;
idx = init;
if(d.y > ret) {
ret = d.y;
idx = init + 1;
}
if(d.z > ret) {
ret = d.z;
idx = init + 2;
}
if(d.w > ret) {
ret = d.w;
idx = init + 3;
}
return ret;
}
template<class T, int tile_height, int tile_width, bool full_join, bool only_col, int BLOCKSZ>
__device__ inline void initialize_tile_memory(const unsigned long long int* __restrict__ profile_A,
const unsigned long long int* __restrict__ profile_B,
const double* __restrict__ df_A, const double* __restrict__ df_B,
const double* __restrict__ dg_A, const double* __restrict__ dg_B,
const double* __restrict__ norms_A, const double* __restrict__ norms_B,
mp_entry* __restrict__ local_mp_col, mp_entry* __restrict__ local_mp_row,
T* __restrict__ df_col, T* __restrict__ df_row, T* __restrict__ dg_col,
T* __restrict__ dg_row, T* __restrict__ norm_col, T* __restrict__ norm_row,
const unsigned int n_x, const unsigned int n_y, const unsigned int col_start,
const unsigned int row_start)
{
int global_position = col_start + threadIdx.x;
int local_position = threadIdx.x;
while(local_position < tile_width && global_position < n_x) {
dg_col[local_position] = dg_A[global_position];
df_col[local_position] = df_A[global_position];
norm_col[local_position] = norms_A[global_position];
if(full_join || only_col) {
local_mp_col[local_position].ulong = profile_A[global_position];
}
local_position += BLOCKSZ;
global_position += BLOCKSZ;
}
global_position = row_start + threadIdx.x;
local_position = threadIdx.x;
while(local_position < tile_height && global_position < n_y) {
dg_row[local_position] = dg_B[global_position];
df_row[local_position] = df_B[global_position];
norm_row[local_position] = norms_B[global_position];
if(full_join || !only_col) {
local_mp_row[local_position].ulong = profile_B[global_position];
}
local_position += BLOCKSZ;
global_position += BLOCKSZ;
}
}
// This does one row of work for 2 diagonals in a single thread
template<class T, bool full_join, bool only_col>
__device__ inline void do_unrolled_row2(T &cov1, T &cov2, float &distcol1, float &distcol2, unsigned int &idxcol1,
unsigned int &idxcol2, const T &inormcx, const T &inormcy, const T &inormr,
const T &df_colx, const T &df_coly, const T &dg_colx, const T &dg_coly,
const T &df_row, const T &dg_row, const int &row, const int &col,
const int &global_row, const int &global_col, mp_entry* __restrict__ mp_row,
const float &curr_val) {
float2 dist;
// Compute the row's distances
dist.x = cov1 * inormcx * inormr;
dist.y = cov2 * inormcy * inormr;
// Compute the next covariance values
cov1 = cov1 + df_colx * dg_row + dg_colx * df_row;
cov2 = cov2 + df_coly * dg_row + dg_coly * df_row;
// Update the column best-so-far values
if(full_join || only_col) {
MPMax2(distcol1, dist.x, idxcol1, global_row);
MPMax2(distcol2, dist.y, idxcol2, global_row);
}
if(full_join || !only_col) {
unsigned int idx = global_col;
// We take the maximum of the columns we computed for the row
// And use that value to check the matrix profile
MPMax2(dist.x, dist.y, idx, global_col + 1);
MPatomicMax_check((unsigned long long*) (mp_row + row), dist.x, idx, curr_val);
}
}
// This does one row of work for 4 diagonals in a single thread
template<class T, bool full_join, bool only_col>
__device__ inline void do_unrolled_row4(T &cov1, T &cov2, T &cov3, T &cov4,
float &distcol1, float &distcol2, float &distcol3,
float &distcol4, unsigned int &idxcol1,
unsigned int &idxcol2, unsigned int &idxcol3, unsigned int &idxcol4,
const T &inormcx, const T &inormcy, const T &inormcz,
const T &inormcw, const T &inormr,
const T &df_colx, const T &df_coly, const T &df_colz,
const T &df_colw, const T &dg_colx, const T &dg_coly,
const T &dg_colz, const T &dg_colw, const T &df_row,
const T &dg_row, const int &row, const int &col,
const int &global_row, const int &global_col,
mp_entry* __restrict__ mp_row, const float &curr_val) {
float4 dist;
// Compute the row's distances
dist.x = cov1 * inormcx * inormr;
dist.y = cov2 * inormcy * inormr;
dist.z = cov3 * inormcz * inormr;
dist.w = cov4 * inormcw * inormr;
// Compute the next covariance values
cov1 = cov1 + df_colx * dg_row + dg_colx * df_row;
cov2 = cov2 + df_coly * dg_row + dg_coly * df_row;
cov3 = cov3 + df_colz * dg_row + dg_colz * df_row;
cov4 = cov4 + df_colw * dg_row + dg_colw * df_row;
// Update the column best-so-far values
if(full_join || only_col) {
MPMax2(distcol1, dist.x, idxcol1, global_row);
MPMax2(distcol2, dist.y, idxcol2, global_row);
MPMax2(distcol3, dist.z, idxcol3, global_row);
MPMax2(distcol4, dist.w, idxcol4, global_row);
}
if(full_join || !only_col) {
unsigned int idx;
// We take the maximum of the columns we computed for the row
// And use that value to check the matrix profile
float d = max4(dist, global_col, idx);
MPatomicMax_check((unsigned long long*) (mp_row + row), d, idx, curr_val);
}
}
// Processes 2 iterations of the inner loop. Each thread computes 2 distances per iteration (x,y), (x+1,y)
// This function assumes that the edge cases that occur on the edge of the distance matrix are not present. This is the faster path,
// with less conditional branching.
template<class T, class VT2, bool full_join, bool only_col>
__device__ inline void do_iteration_unroll_2(int i, int j, int x, int y, T &cov1, T &cov2,
T* __restrict__ df_col, T* __restrict__ df_row,
T* __restrict__ dg_col, T* __restrict__ dg_row,
T* __restrict__ inorm_col, T* __restrict__ inorm_row,
mp_entry* __restrict__ local_mp_col, mp_entry* __restrict__ local_mp_row)
{
float2 distc = make_float2(CC_MIN, CC_MIN);
float2 distc2 = make_float2(CC_MIN, CC_MIN);
uint2 idxc,idxc2;
// Load row values 2 at a time, load column values 4 at a time
int r = i >> 1;
int c = j >> 1;
// Preload the shared memory values we will use into registers
// We load 4 values per thread into a float4 vector type
VT2 dfc = reinterpret_cast<VT2*>(df_col)[c];
VT2 dgc = reinterpret_cast<VT2*>(dg_col)[c];
VT2 inormc = reinterpret_cast<VT2*>(inorm_col)[c];
VT2 dfc2 = reinterpret_cast<VT2*>(df_col)[c+1];
VT2 dgc2 = reinterpret_cast<VT2*>(dg_col)[c+1];
VT2 inormc2 = reinterpret_cast<VT2*>(inorm_col)[c+1];
ulonglong2 mp_col_check1, mp_col_check2;
ulonglong2 mp_row_check;
// Copy the pieces of the cache we will use into registers with vectorized loads
if(full_join || only_col) {
mp_col_check1 = reinterpret_cast<ulonglong2*>(local_mp_col)[c];
}
if(full_join || !only_col) {
mp_row_check = reinterpret_cast<ulonglong2*>(local_mp_row)[r];
}
VT2 dgr = reinterpret_cast<VT2*>(dg_row)[r];
VT2 dfr = reinterpret_cast<VT2*>(df_row)[r];
VT2 inormr = reinterpret_cast<VT2*>(inorm_row)[r];
// Do rows one at a time:
// We are computing a tile that looks like this:
// C:1 2 3
//R1 X X
//R2 X X
// For 2 diagonals unrolled 2 times we compute a total of 4 distances.
// These distances cover 2 possible rows and 3 possible columns, so we need to check the matrix profile
// 5 times total, once for each row and once for each column
mp_entry e;
e.ulong = mp_row_check.x;
do_unrolled_row2<T, full_join,only_col>(cov1, cov2, distc.x, distc.y, idxc.x, idxc.y,
inormc.x, inormc.y, inormr.x, dfc.x, dfc.y,
dgc.x, dgc.y, dfr.x, dgr.x, i, j, y, x, local_mp_row, e.floats[0]);
// Each row's computation allows us to complete a column, the first row completes column 1
if(full_join || only_col) {
e.ulong = mp_col_check1.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j), distc.x, idxc.x, e.floats[0]);
}
e.ulong = mp_row_check.y;
do_unrolled_row2<T,full_join, only_col>(cov1, cov2, distc.y, distc2.x, idxc.y,
idxc2.x, inormc.y, inormc2.x, inormr.y,
dfc.y, dfc2.x, dgc.y, dgc2.x, dfr.y, dgr.y,
i + 1, j + 1, y + 1, x + 1, local_mp_row, e.floats[0]);
// The second row completes column 2 and 3
if(full_join || only_col) {
e.ulong = mp_col_check1.y;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 1), distc.y, idxc.y, e.floats[0]);
mp_col_check2 = reinterpret_cast<ulonglong2*>(local_mp_col)[c+1];
e.ulong = mp_col_check2.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 2), distc2.x, idxc2.x, e.floats[0]);
}
}
// Processes 4 iterations of the inner loop. Each thread computes 4 distances per iteration (x,y), (x+1,y), (x+2,y), and (x+3,y)
// This function assumes that the edge cases that occur on the edge of the distance matrix are not present. This is the faster path,
// with less conditional branching.
template<class T, class VT4, class VT2, bool full_join, bool only_col>
__device__ inline void do_iteration_unroll_4(int i, int j, int x, int y, T &cov1, T &cov2, T &cov3,
T &cov4, T* __restrict__ df_col, T* __restrict__ df_row,
T* __restrict__ dg_col, T* __restrict__ dg_row,
T* __restrict__ inorm_col, T* __restrict__ inorm_row,
mp_entry* __restrict__ local_mp_col, mp_entry* __restrict__ local_mp_row)
{
float4 distc = make_float4(CC_MIN, CC_MIN, CC_MIN, CC_MIN);
float4 distc2 = make_float4(CC_MIN, CC_MIN, CC_MIN, CC_MIN);
uint4 idxc,idxc2;
// Load row values 2 at a time, load column values 4 at a time
int r = i >> 1;
int c = j >> 2;
int c2 = j >> 1;
// Preload the shared memory values we will use into registers
// We load 4 values per thread into a float4 vector type
VT4 dfc = reinterpret_cast<VT4*>(df_col)[c];
VT4 dgc = reinterpret_cast<VT4*>(dg_col)[c];
VT4 inormc = (reinterpret_cast<VT4*>(inorm_col)[c]);
VT4 dfc2 = reinterpret_cast<VT4*>(df_col)[c+1];
VT4 dgc2 = reinterpret_cast<VT4*>(dg_col)[c+1];
VT4 inormc2 = reinterpret_cast<VT4*>(inorm_col)[c+1];
ulonglong2 mp_col_check1, mp_col_check2;
ulonglong2 mp_row_check;
// Copy the pieces of the cache we will use into registers with vectorized loads
if(full_join || only_col) {
mp_col_check1 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2];
}
if(full_join || !only_col) {
mp_row_check = reinterpret_cast<ulonglong2*>(local_mp_row)[r];
}
// Due to a lack of registers on volta, we only load these row values 2 at a time
VT2 dgr = reinterpret_cast<VT2*>(dg_row)[r];
VT2 dfr = reinterpret_cast<VT2*>(df_row)[r];
VT2 inormr = reinterpret_cast<VT2*>(inorm_row)[r];
// Do rows one at a time:
// We are computing a tile that looks like this:
// C:1 2 3 4 5 6 7
//R1 X X X X
//R2 X X X X
//R3 X X X X
//R4 X X X X
// For 4 diagonals unrolled 4 times we compute a total of 16 distances.
// These distances cover 4 possible rows and 7 possible columns, so we need to check the matrix profile
// 11 times total, once for each row and once for each column
mp_entry e;
e.ulong = mp_row_check.x;
do_unrolled_row4<T, full_join,only_col>(cov1, cov2, cov3, cov4, distc.x, distc.y, distc.z, distc.w,
idxc.x, idxc.y, idxc.z, idxc.w, inormc.x, inormc.y, inormc.z,
inormc.w, inormr.x, dfc.x, dfc.y, dfc.z, dfc.w, dgc.x, dgc.y,
dgc.z, dgc.w, dfr.x, dgr.x, i, j, y, x, local_mp_row, e.floats[0]);
// Each row's computation allows us to complete a column, the first row completes column 1
if(full_join || only_col) {
e.ulong = mp_col_check1.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j), distc.x, idxc.x, e.floats[0]);
}
e.ulong = mp_row_check.y;
do_unrolled_row4<T,full_join, only_col>(cov1, cov2, cov3, cov4, distc.y, distc.z, distc.w, distc2.x,
idxc.y, idxc.z, idxc.w, idxc2.x, inormc.y, inormc.z, inormc.w,
inormc2.x, inormr.y, dfc.y, dfc.z, dfc.w, dfc2.x, dgc.y, dgc.z,
dgc.w, dgc2.x, dfr.y, dgr.y, i + 1, j + 1, y + 1, x + 1,
local_mp_row, e.floats[0]);
// The second row completes column 2
if(full_join || only_col) {
e.ulong = mp_col_check1.y;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 1), distc.y, idxc.y, e.floats[0]);
}
// Load the values for the next 2 rows
dgr = reinterpret_cast<VT2*>(dg_row)[r + 1];
dfr = reinterpret_cast<VT2*>(df_row)[r + 1];
inormr = reinterpret_cast<VT2*>(inorm_row)[r + 1];
if(full_join || !only_col) {
mp_row_check = reinterpret_cast<ulonglong2*>(local_mp_row)[r + 1];
}
e.ulong = mp_row_check.x;
do_unrolled_row4<T,full_join,only_col>(cov1, cov2, cov3, cov4, distc.z, distc.w, distc2.x, distc2.y,
idxc.z, idxc.w, idxc2.x, idxc2.y, inormc.z, inormc.w, inormc2.x,
inormc2.y, inormr.x, dfc.z, dfc.w, dfc2.x, dfc2.y, dgc.z, dgc.w,
dgc2.x, dgc2.y, dfr.x, dgr.x, i + 2, j + 2, y + 2, x + 2,
local_mp_row, e.floats[0]);
// The third row completes column 3
if(full_join || only_col) {
mp_col_check2 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2 + 1];
e.ulong = mp_col_check2.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 2), distc.z, idxc.z, e.floats[0]);
}
e.ulong = mp_row_check.y;
do_unrolled_row4<T,full_join,only_col>(cov1, cov2, cov3, cov4, distc.w, distc2.x, distc2.y, distc2.z,
idxc.w, idxc2.x, idxc2.y, idxc2.z, inormc.w, inormc2.x, inormc2.y,
inormc2.z, inormr.y, dfc.w, dfc2.x, dfc2.y, dfc2.z, dgc.w, dgc2.x,
dgc2.y, dgc2.z, dfr.y, dgr.y, i + 3, j + 3, y + 3, x + 3,
local_mp_row, e.floats[0]);
// After the 4th row, we have completed columns 4, 5, 6, and 7
if(full_join || only_col) {
e.ulong = mp_col_check2.y;
mp_col_check1 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2+2];
mp_col_check2 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2+3];
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 3), distc.w, idxc.w, e.floats[0]);
e.ulong = mp_col_check1.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 4), distc2.x, idxc2.x, e.floats[0]);
e.ulong = mp_col_check1.y;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 5), distc2.y, idxc2.y, e.floats[0]);
e.ulong = mp_col_check2.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 6), distc2.z, idxc2.z, e.floats[0]);
}
}
// Does a single iteration of the inner loop on 2 diagonals per thread, not unrolled
// Checks for the boundary case where only 1 diagonal can be updated
template<class T, bool full_join, bool only_col>
__device__ inline void do_iteration_2diag(int i, int j, int x, int y,
size_t global_start_x, size_t global_start_y,
int n, T &cov1, T &cov2, T* __restrict__ df_col,
T* __restrict__ df_row, T* __restrict__ dg_col,
T* __restrict__ dg_row, T* __restrict__ inorm_col,
T* __restrict__ inorm_row, mp_entry* __restrict__ local_mp_col,
mp_entry* __restrict__ local_mp_row, size_t diag, size_t num_diags)
{
float dist_1;
unsigned int idx_1;
float2 dist;
// Compute the next set of distances (row y)
dist.x = cov1 * inorm_col[j] * inorm_row[i];
dist.y = cov2 * inorm_col[j + 1] * inorm_row[i];
// Update cov and compute the next distance values (row y)
cov1 = cov1 + df_col[j] * dg_row[i] + dg_col[j] * df_row[i];
cov2 = cov2 + df_col[j+1] * dg_row[i] + dg_col[j+1] * df_row[i];
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j), dist.x, y + global_start_y);
}
dist_1 = dist.x;
idx_1 = x + global_start_x;
if(x + 1 < n && diag + 1 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.y, idx_1, global_start_x + x + 1, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 1), dist.y, y + global_start_y);
}
}
if(full_join || !only_col) {
MPatomicMax((unsigned long long*) (local_mp_row + i), dist_1, idx_1);
}
}
// Does a single iteration of the inner loop on 4 diagonals per thread, not unrolled
// Checks for the boundary case where only 1, 2, or 3 diagonals can be updated
template<class T, bool full_join, bool only_col>
__device__ inline void do_iteration_4diag(int i, int j, int x, int y,
size_t global_start_x, size_t global_start_y,
int n, T &cov1, T &cov2,
T &cov3, T &cov4, T* __restrict__ df_col,
T* __restrict__ df_row, T* __restrict__ dg_col,
T* __restrict__ dg_row, T* __restrict__ inorm_col,
T* __restrict__ inorm_row, mp_entry* __restrict__ local_mp_col,
mp_entry* __restrict__ local_mp_row, size_t diag, size_t num_diags)
{
float dist_1;
unsigned int idx_1;
float4 dist;
// Compute the next set of distances (row y)
dist.x = static_cast<float>(cov1) * inorm_col[j] * inorm_row[i];
dist.y = static_cast<float>(cov2) * inorm_col[j + 1] * inorm_row[i];
dist.z = static_cast<float>(cov3) * inorm_col[j + 2] * inorm_row[i];
dist.w = static_cast<float>(cov4) * inorm_col[j + 3] * inorm_row[i];
// Update cov and compute the next distance values (row y)
cov1 = cov1 + df_col[j] * dg_row[i] + dg_col[j] * df_row[i];
cov2 = cov2 + df_col[j+1] * dg_row[i] + dg_col[j+1] * df_row[i];
cov3 = cov3 + df_col[j+2] * dg_row[i] + dg_col[j + 2] * df_row[i];
cov4 = cov4 + df_col[j+3] * dg_row[i] + dg_col[j + 3] * df_row[i];
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j), dist.x, y + global_start_y);
}
dist_1 = dist.x;
idx_1 = x + global_start_x;
if(x + 1 < n && diag + 1 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.y, idx_1, global_start_x + x + 1, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 1), dist.y, y + global_start_y);
}
}
if(x + 2 < n && diag + 2 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.z, idx_1, global_start_x + x + 2, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 2), dist.z, y + global_start_y);
}
}
if(x + 3 < n && diag + 3 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.w, idx_1, global_start_x + x + 3, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 3), dist.w, y + global_start_y);
}
}
if(full_join || !only_col) {
MPatomicMax((unsigned long long*) (local_mp_row + i), dist_1, idx_1);
}
}
//Computes the matrix profile given the sliding dot products for the first query and the precomputed data statisics
template<class T, class T2, class T4, bool fp64, bool full_join, bool only_col, int blocks_per_sm, int diags_per_thread, int tile_height, int BLOCKSZ>
__global__ void __launch_bounds__(BLOCKSZ, blocks_per_sm)
do_tile(const double* __restrict__ Cov, const double* __restrict__ dfa,
const double* __restrict__ dfb, const double* __restrict__ dga,
const double* __restrict__ dgb, const double* __restrict__ normsa,
const double* __restrict__ normsb,
unsigned long long* __restrict__ profile_A,
unsigned long long* __restrict__ profile_B,
const unsigned int m, const unsigned int n_x, const unsigned int n_y,
const unsigned int global_start_x, const unsigned int global_start_y,
const int exclusion_lower, const int exclusion_upper)
{
// tile_height must be a multiple of 4
// Tuned for V100
const int tile_width = tile_height + BLOCKSZ * diags_per_thread;
extern __shared__ char smem[];
mp_entry *local_mp_col, *local_mp_row;
T *df_col, *dg_col, *inorm_col, *df_row, *dg_row, *inorm_row;
df_col = (T*) smem;
dg_col = df_col + tile_width;
inorm_col = dg_col + tile_width;
df_row = inorm_col + tile_width;
dg_row = df_row + tile_height;
inorm_row = dg_row + tile_height;
mp_entry *pos = (mp_entry*) (inorm_row + tile_height);
if(!full_join && only_col) {
local_mp_col = pos;
} else if(!full_join) {
local_mp_row = pos;
} else {
local_mp_col = pos;
local_mp_row = pos + tile_width;
}
const unsigned int start_diag = (threadIdx.x * diags_per_thread) + blockIdx.x * (blockDim.x * diags_per_thread);
// This is the index of the meta-diagonal that this thread block will work on
const unsigned int meta_diagonal_idx = blockIdx.x;
// The first threads are acutally computing the trivial match between the same subsequence
// we exclude these from the calculation
int tile_start_x = meta_diagonal_idx * (BLOCKSZ * diags_per_thread) + exclusion_lower;
int tile_start_y = 0;
// x is the global column of the distance matrix
// y is the global row of the distance matrix
// localX, localY are the local coordinates of the thread position in the tile it is working on
int x = tile_start_x + threadIdx.x * diags_per_thread;
int y = 0;
// Each thread updates 2 diagonals at once
T cov1, cov2, cov3, cov4;
const unsigned int num_diags = n_x - exclusion_upper;
// Load the first dot product values
if (x < n_x) {
cov1 = Cov[x];
}
if (x + 1 < n_x && diags_per_thread > 1) {
cov2 = Cov[x + 1];
}
if (x + 2 < n_x && diags_per_thread > 2) {
cov3 = Cov[x + 2];
}
if(x + 3 < n_x && diags_per_thread > 3) {
cov4 = Cov[x + 3];
}
/////////////////////////////////////
// Main loop
/////////////////////////////////////
// Each threadblock finds all the distances on a 'metadiagonal'
// We use a tiled approach for each thread block
// The tiles are horizontal slices of the diagonal, think of a parallelogram cut
// from a diagonal slice of the distance matrix
// Each thread starts on the first row and works its way down-right towards right
// side of the distance matrix
while (tile_start_x < n_x && tile_start_y < n_y)
{
// Initialize the next tile's shared memory
initialize_tile_memory<T,tile_height,tile_width,full_join,only_col, BLOCKSZ>(profile_A, profile_B, dfa, dfb, dga, dgb,
normsa, normsb, local_mp_col, local_mp_row,
df_col, df_row, dg_col, dg_row, inorm_col, inorm_row,
n_x, n_y, tile_start_x, tile_start_y);
// Start of new tile, sync
__syncthreads();
// There are 2 pathways here, most of the time we take the fast path (top),
// the last block will take the slower path as well as the fast path (bottom)
if(tile_start_x + tile_width < n_x && tile_start_y + tile_height < n_y && start_diag + diags_per_thread - 1 < num_diags) {
for(int i = 0, j = threadIdx.x * diags_per_thread; i < tile_height; i+=diags_per_thread, j+=diags_per_thread) {
if(diags_per_thread == 4) {
do_iteration_unroll_4<T,T4,T2,full_join, only_col>(i,j,x + global_start_x + i,y + global_start_y + i, cov1,cov2,cov3,cov4,df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row);
} else if(diags_per_thread == 2) {
do_iteration_unroll_2<T,T2,full_join, only_col>(i,j,x + global_start_x + i,y + global_start_y + i, cov1,cov2,df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row);
}
}
x += tile_height;
y += tile_height;
} else if (start_diag < num_diags){
int localX = threadIdx.x * diags_per_thread;
int localY = 0;
while(x < n_x && y < n_y && localY < tile_height) {
if(diags_per_thread == 4) {
do_iteration_4diag<T,full_join, only_col>(localY,localX,x,y,global_start_x,global_start_y,n_x,cov1,cov2,cov3,cov4, df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row, start_diag, num_diags);
} else if(diags_per_thread == 2) {
do_iteration_2diag<T,full_join, only_col>(localY,localX,x,y,global_start_x,global_start_y,n_x,cov1,cov2,df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row, start_diag, num_diags);
}
++x;
++y;
++localX;
++localY;
}
}
// After this sync, the caches will be updated with the best so far values for this tile
__syncthreads();
int global_position, local_position;
if(full_join || only_col) {
// If we updated any values in the cached MP, try to push them to the global "master" MP
global_position = tile_start_x + threadIdx.x;
local_position = threadIdx.x;
while(local_position < tile_width && global_position < n_x) {
mp_entry e = local_mp_col[local_position];
MPatomicMax(profile_A + global_position, e.floats[0], e.ints[1]);
global_position += BLOCKSZ;
local_position += BLOCKSZ;
}
}
if(full_join || !only_col) {
global_position = tile_start_y + threadIdx.x;
local_position = threadIdx.x;
while(local_position < tile_height && global_position < n_y) {
mp_entry e = local_mp_row[local_position];
MPatomicMax(profile_B + global_position, e.floats[0], e.ints[1]);
global_position += BLOCKSZ;
local_position += BLOCKSZ;
}
}
// Update the tile position
tile_start_x += tile_height;
tile_start_y += tile_height;
// Make sure our updates were committed before we pull in the next tile
__threadfence_block();
}
}
int get_diags_per_thread(bool fp64, const hipDeviceProp_t &dev_prop) {
return 4;
}
int get_blocksz(bool fp64, const hipDeviceProp_t &dev_prop) {
if(fp64) {
return BLOCKSZ_DP;
} else {
return BLOCKSZ_SP;
}
}
template< class T >
int get_smem(int tile_height, bool fp64, bool full_join, bool only_column_join, const hipDeviceProp_t &dev_prop) {
int smem;
int diags_per_thread = get_diags_per_thread(fp64, dev_prop);
int blocksz = get_blocksz(fp64, dev_prop);
int tile_width = blocksz * diags_per_thread + tile_height;
smem = (tile_width + tile_height) * 3 * sizeof(T);
if(full_join) {
smem += (tile_width + tile_height) * sizeof(mp_entry);
} else if( only_column_join){
smem += tile_width * sizeof(mp_entry);
} else {
smem += tile_height * sizeof(mp_entry);
}
printf("Using %d KiB smem per block\n", smem / 1024);
return smem;
}
SCRIMPError_t kernel_ab_join_upper(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, size_t global_start_x, size_t global_start_y, const hipDeviceProp_t &props, bool fp64, bool full_join, hipStream_t s)
{
int diags_per_thread = get_diags_per_thread(fp64, props);
int blocksz = get_blocksz(fp64, props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
int num_workers = ceil(tile_width / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
if(full_join) {
// We can have an exclusion zone if this ab join is part of a larger self-join
int exclusion = window_size / 4;
if(global_y + global_start_y >= global_x + global_start_x && global_start_y + global_y <= global_start_x + global_x + exclusion) {
num_workers = ceil((tile_width - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
}else {
exclusion = 0;
}
if(tile_width <= exclusion) {
return SCRIMP_NO_ERROR;
}
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, true, props);
hipLaunchKernelGGL(( do_tile<double, double2, double4, true, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP>), dim3(grid),dim3(block),smem,s, QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, true, props);
hipLaunchKernelGGL(( do_tile<float, float2, float4, false, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP>), dim3(grid),dim3(block),smem,s, QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
}
} else {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, false, true, props);
hipLaunchKernelGGL(( do_tile<double, double2, double4, true, false, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP>), dim3(grid),dim3(block),smem,s, QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
0,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, false, true, props);
hipLaunchKernelGGL(( do_tile<float, float2, float4, false, false, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP>), dim3(grid),dim3(block),smem,s, QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
0,0);
}
}
hipError_t err = hipPeekAtLastError();
if(err != hipSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t kernel_ab_join_lower(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, size_t global_start_x, size_t global_start_y, const hipDeviceProp_t &props, bool fp64, bool full_join, hipStream_t s)
{
int diags_per_thread = get_diags_per_thread(fp64, props);
int blocksz = get_blocksz(fp64, props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
int num_workers = ceil(tile_height / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
if(full_join) {
// We can have an exclusion zone if this ab join is part of a larger self-join
int exclusion = window_size / 4;
if(global_y + global_start_y + tile_height >= global_x + global_start_x && global_y + global_start_y + tile_height <= global_x + global_start_x + exclusion) {
num_workers = ceil((tile_height - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
} else {
exclusion = 0;
}
if(tile_height <= exclusion) {
return SCRIMP_NO_ERROR;
}
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, true, props);
hipLaunchKernelGGL(( do_tile<double, double2, double4, true, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP>), dim3(grid),dim3(block),smem,s, QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,exclusion);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, true, props);
hipLaunchKernelGGL(( do_tile<float, float2, float4, false, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP>), dim3(grid),dim3(block),smem,s, QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,exclusion);
}
} else {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, false, false, props);
hipLaunchKernelGGL(( do_tile<double, double2, double4, true, false, false, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP>), dim3(grid),dim3(block),smem,s, QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, false, false, props);
hipLaunchKernelGGL(( do_tile<float, float2, float4, false, false, false, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP>), dim3(grid),dim3(block),smem,s, QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,0);
}
}
hipError_t err = hipPeekAtLastError();
if(err != hipSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t kernel_self_join_upper(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, const hipDeviceProp_t &props, bool fp64, hipStream_t s)
{
int exclusion = window_size / 4;
int diags_per_thread = get_diags_per_thread(fp64,props);
int blocksz = get_blocksz(fp64,props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
if(global_y >= global_x && global_y <= global_x + exclusion) {
int num_workers = ceil((tile_width - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
} else {
int num_workers = ceil(tile_width / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
exclusion = 0;
}
if(exclusion < tile_width) {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, false, props);
hipLaunchKernelGGL(( do_tile<double, double2, double4, true, true,false, BLOCKSPERSM_SELF, 4, TILE_HEIGHT_DP, BLOCKSZ_DP>), dim3(grid),dim3(block),smem,s, QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, false, props);
hipLaunchKernelGGL(( do_tile<float, float2, float4, false, true,false, BLOCKSPERSM_SELF,4, TILE_HEIGHT, BLOCKSZ_SP>), dim3(grid),dim3(block),smem,s, QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
}
}
hipError_t err = hipPeekAtLastError();
if(err != hipSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t kernel_self_join_lower(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, const hipDeviceProp_t &props, bool fp64, hipStream_t s)
{
int exclusion = window_size / 4;
int diags_per_thread = get_diags_per_thread(fp64, props);
int blocksz = get_blocksz(fp64, props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
if(global_y + tile_height >= global_x && global_y + tile_height <= global_x + exclusion) {
int num_workers = ceil((tile_height - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
} else {
int num_workers = ceil(tile_height / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
exclusion = 0;
}
if(exclusion < tile_height) {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, false, props);
hipLaunchKernelGGL(( do_tile<double, double2,double4, true, true,false, BLOCKSPERSM_SELF, 4, TILE_HEIGHT_DP, BLOCKSZ_DP>), dim3(grid),dim3(block),smem,s, QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0, exclusion);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, false, props);
hipLaunchKernelGGL(( do_tile<float,float2,float4, false, true,false, BLOCKSPERSM_SELF, 4, TILE_HEIGHT, BLOCKSZ_SP>), dim3(grid),dim3(block),smem,s, QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0, exclusion);
}
}
hipError_t err = hipPeekAtLastError();
if(err != hipSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
} // namespace SCRIMP
| 48e8717a1739a33c137b530af5f3f2284e1649db.cu | #include "kernels.h"
namespace SCRIMP {
#define BLOCKSZ_SP 512
#define BLOCKSZ_DP 256
#define BLOCKSPERSM_SELF 2
#define BLOCKSPERSM_AB 2
#define TILE_HEIGHT 200
#define TILE_HEIGHT_DP 200
//Atomically updates the MP/idxs using a single 64-bit integer. We lose a small amount of precision in the output, if we do not do this we are unable
// to atomically update both the matrix profile and the indexes without using a critical section and dedicated locks.
__device__ inline void MPatomicMax(volatile unsigned long long int* address, float val, unsigned int idx)
{
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val){
loctest.ulong = atomicCAS((unsigned long long int*) address, loctest.ulong, loc.ulong);
}
}
// As above, but checks a previously read value before attempting another read
// This allows us to exploit vectorized loads of the matrix profile
__device__ inline void MPatomicMax_check(volatile unsigned long long int* __restrict__ address, float val, unsigned int idx, float curr_val)
{
if(val > curr_val) {
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val){
loctest.ulong = atomicCAS((unsigned long long int*) address, loctest.ulong, loc.ulong);
}
}
}
__device__ inline void MPMax(const float d1, const float d2, const unsigned int i1,
const unsigned int i2, float &outd, unsigned int &outi)
{
if(d1 >= d2) {
outd = d1;
outi = i1;
} else {
outd = d2;
outi = i2;
}
}
// Computes max(a,b) with index and stores the result in a
__device__ inline void MPMax2(float &d1, const float &d2, unsigned int &i1,
const unsigned int &i2)
{
if(d2 > d1) {
d1 = d2;
i1 = i2;
}
}
// Computes the max of 4 values in a float 4
__device__ inline float max4(const float4 &d, const unsigned int init, unsigned int &idx) {
float ret = d.x;
idx = init;
if(d.y > ret) {
ret = d.y;
idx = init + 1;
}
if(d.z > ret) {
ret = d.z;
idx = init + 2;
}
if(d.w > ret) {
ret = d.w;
idx = init + 3;
}
return ret;
}
template<class T, int tile_height, int tile_width, bool full_join, bool only_col, int BLOCKSZ>
__device__ inline void initialize_tile_memory(const unsigned long long int* __restrict__ profile_A,
const unsigned long long int* __restrict__ profile_B,
const double* __restrict__ df_A, const double* __restrict__ df_B,
const double* __restrict__ dg_A, const double* __restrict__ dg_B,
const double* __restrict__ norms_A, const double* __restrict__ norms_B,
mp_entry* __restrict__ local_mp_col, mp_entry* __restrict__ local_mp_row,
T* __restrict__ df_col, T* __restrict__ df_row, T* __restrict__ dg_col,
T* __restrict__ dg_row, T* __restrict__ norm_col, T* __restrict__ norm_row,
const unsigned int n_x, const unsigned int n_y, const unsigned int col_start,
const unsigned int row_start)
{
int global_position = col_start + threadIdx.x;
int local_position = threadIdx.x;
while(local_position < tile_width && global_position < n_x) {
dg_col[local_position] = dg_A[global_position];
df_col[local_position] = df_A[global_position];
norm_col[local_position] = norms_A[global_position];
if(full_join || only_col) {
local_mp_col[local_position].ulong = profile_A[global_position];
}
local_position += BLOCKSZ;
global_position += BLOCKSZ;
}
global_position = row_start + threadIdx.x;
local_position = threadIdx.x;
while(local_position < tile_height && global_position < n_y) {
dg_row[local_position] = dg_B[global_position];
df_row[local_position] = df_B[global_position];
norm_row[local_position] = norms_B[global_position];
if(full_join || !only_col) {
local_mp_row[local_position].ulong = profile_B[global_position];
}
local_position += BLOCKSZ;
global_position += BLOCKSZ;
}
}
// This does one row of work for 2 diagonals in a single thread
template<class T, bool full_join, bool only_col>
__device__ inline void do_unrolled_row2(T &cov1, T &cov2, float &distcol1, float &distcol2, unsigned int &idxcol1,
unsigned int &idxcol2, const T &inormcx, const T &inormcy, const T &inormr,
const T &df_colx, const T &df_coly, const T &dg_colx, const T &dg_coly,
const T &df_row, const T &dg_row, const int &row, const int &col,
const int &global_row, const int &global_col, mp_entry* __restrict__ mp_row,
const float &curr_val) {
float2 dist;
// Compute the row's distances
dist.x = cov1 * inormcx * inormr;
dist.y = cov2 * inormcy * inormr;
// Compute the next covariance values
cov1 = cov1 + df_colx * dg_row + dg_colx * df_row;
cov2 = cov2 + df_coly * dg_row + dg_coly * df_row;
// Update the column best-so-far values
if(full_join || only_col) {
MPMax2(distcol1, dist.x, idxcol1, global_row);
MPMax2(distcol2, dist.y, idxcol2, global_row);
}
if(full_join || !only_col) {
unsigned int idx = global_col;
// We take the maximum of the columns we computed for the row
// And use that value to check the matrix profile
MPMax2(dist.x, dist.y, idx, global_col + 1);
MPatomicMax_check((unsigned long long*) (mp_row + row), dist.x, idx, curr_val);
}
}
// This does one row of work for 4 diagonals in a single thread
template<class T, bool full_join, bool only_col>
__device__ inline void do_unrolled_row4(T &cov1, T &cov2, T &cov3, T &cov4,
float &distcol1, float &distcol2, float &distcol3,
float &distcol4, unsigned int &idxcol1,
unsigned int &idxcol2, unsigned int &idxcol3, unsigned int &idxcol4,
const T &inormcx, const T &inormcy, const T &inormcz,
const T &inormcw, const T &inormr,
const T &df_colx, const T &df_coly, const T &df_colz,
const T &df_colw, const T &dg_colx, const T &dg_coly,
const T &dg_colz, const T &dg_colw, const T &df_row,
const T &dg_row, const int &row, const int &col,
const int &global_row, const int &global_col,
mp_entry* __restrict__ mp_row, const float &curr_val) {
float4 dist;
// Compute the row's distances
dist.x = cov1 * inormcx * inormr;
dist.y = cov2 * inormcy * inormr;
dist.z = cov3 * inormcz * inormr;
dist.w = cov4 * inormcw * inormr;
// Compute the next covariance values
cov1 = cov1 + df_colx * dg_row + dg_colx * df_row;
cov2 = cov2 + df_coly * dg_row + dg_coly * df_row;
cov3 = cov3 + df_colz * dg_row + dg_colz * df_row;
cov4 = cov4 + df_colw * dg_row + dg_colw * df_row;
// Update the column best-so-far values
if(full_join || only_col) {
MPMax2(distcol1, dist.x, idxcol1, global_row);
MPMax2(distcol2, dist.y, idxcol2, global_row);
MPMax2(distcol3, dist.z, idxcol3, global_row);
MPMax2(distcol4, dist.w, idxcol4, global_row);
}
if(full_join || !only_col) {
unsigned int idx;
// We take the maximum of the columns we computed for the row
// And use that value to check the matrix profile
float d = max4(dist, global_col, idx);
MPatomicMax_check((unsigned long long*) (mp_row + row), d, idx, curr_val);
}
}
// Processes 2 iterations of the inner loop. Each thread computes 2 distances per iteration (x,y), (x+1,y)
// This function assumes that the edge cases that occur on the edge of the distance matrix are not present. This is the faster path,
// with less conditional branching.
template<class T, class VT2, bool full_join, bool only_col>
__device__ inline void do_iteration_unroll_2(int i, int j, int x, int y, T &cov1, T &cov2,
T* __restrict__ df_col, T* __restrict__ df_row,
T* __restrict__ dg_col, T* __restrict__ dg_row,
T* __restrict__ inorm_col, T* __restrict__ inorm_row,
mp_entry* __restrict__ local_mp_col, mp_entry* __restrict__ local_mp_row)
{
float2 distc = make_float2(CC_MIN, CC_MIN);
float2 distc2 = make_float2(CC_MIN, CC_MIN);
uint2 idxc,idxc2;
// Load row values 2 at a time, load column values 4 at a time
int r = i >> 1;
int c = j >> 1;
// Preload the shared memory values we will use into registers
// We load 4 values per thread into a float4 vector type
VT2 dfc = reinterpret_cast<VT2*>(df_col)[c];
VT2 dgc = reinterpret_cast<VT2*>(dg_col)[c];
VT2 inormc = reinterpret_cast<VT2*>(inorm_col)[c];
VT2 dfc2 = reinterpret_cast<VT2*>(df_col)[c+1];
VT2 dgc2 = reinterpret_cast<VT2*>(dg_col)[c+1];
VT2 inormc2 = reinterpret_cast<VT2*>(inorm_col)[c+1];
ulonglong2 mp_col_check1, mp_col_check2;
ulonglong2 mp_row_check;
// Copy the pieces of the cache we will use into registers with vectorized loads
if(full_join || only_col) {
mp_col_check1 = reinterpret_cast<ulonglong2*>(local_mp_col)[c];
}
if(full_join || !only_col) {
mp_row_check = reinterpret_cast<ulonglong2*>(local_mp_row)[r];
}
VT2 dgr = reinterpret_cast<VT2*>(dg_row)[r];
VT2 dfr = reinterpret_cast<VT2*>(df_row)[r];
VT2 inormr = reinterpret_cast<VT2*>(inorm_row)[r];
// Do rows one at a time:
// We are computing a tile that looks like this:
// C:1 2 3
//R1 X X
//R2 X X
// For 2 diagonals unrolled 2 times we compute a total of 4 distances.
// These distances cover 2 possible rows and 3 possible columns, so we need to check the matrix profile
// 5 times total, once for each row and once for each column
mp_entry e;
e.ulong = mp_row_check.x;
do_unrolled_row2<T, full_join,only_col>(cov1, cov2, distc.x, distc.y, idxc.x, idxc.y,
inormc.x, inormc.y, inormr.x, dfc.x, dfc.y,
dgc.x, dgc.y, dfr.x, dgr.x, i, j, y, x, local_mp_row, e.floats[0]);
// Each row's computation allows us to complete a column, the first row completes column 1
if(full_join || only_col) {
e.ulong = mp_col_check1.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j), distc.x, idxc.x, e.floats[0]);
}
e.ulong = mp_row_check.y;
do_unrolled_row2<T,full_join, only_col>(cov1, cov2, distc.y, distc2.x, idxc.y,
idxc2.x, inormc.y, inormc2.x, inormr.y,
dfc.y, dfc2.x, dgc.y, dgc2.x, dfr.y, dgr.y,
i + 1, j + 1, y + 1, x + 1, local_mp_row, e.floats[0]);
// The second row completes column 2 and 3
if(full_join || only_col) {
e.ulong = mp_col_check1.y;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 1), distc.y, idxc.y, e.floats[0]);
mp_col_check2 = reinterpret_cast<ulonglong2*>(local_mp_col)[c+1];
e.ulong = mp_col_check2.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 2), distc2.x, idxc2.x, e.floats[0]);
}
}
// Processes 4 iterations of the inner loop. Each thread computes 4 distances per iteration (x,y), (x+1,y), (x+2,y), and (x+3,y)
// This function assumes that the edge cases that occur on the edge of the distance matrix are not present. This is the faster path,
// with less conditional branching.
template<class T, class VT4, class VT2, bool full_join, bool only_col>
__device__ inline void do_iteration_unroll_4(int i, int j, int x, int y, T &cov1, T &cov2, T &cov3,
T &cov4, T* __restrict__ df_col, T* __restrict__ df_row,
T* __restrict__ dg_col, T* __restrict__ dg_row,
T* __restrict__ inorm_col, T* __restrict__ inorm_row,
mp_entry* __restrict__ local_mp_col, mp_entry* __restrict__ local_mp_row)
{
float4 distc = make_float4(CC_MIN, CC_MIN, CC_MIN, CC_MIN);
float4 distc2 = make_float4(CC_MIN, CC_MIN, CC_MIN, CC_MIN);
uint4 idxc,idxc2;
// Load row values 2 at a time, load column values 4 at a time
int r = i >> 1;
int c = j >> 2;
int c2 = j >> 1;
// Preload the shared memory values we will use into registers
// We load 4 values per thread into a float4 vector type
VT4 dfc = reinterpret_cast<VT4*>(df_col)[c];
VT4 dgc = reinterpret_cast<VT4*>(dg_col)[c];
VT4 inormc = (reinterpret_cast<VT4*>(inorm_col)[c]);
VT4 dfc2 = reinterpret_cast<VT4*>(df_col)[c+1];
VT4 dgc2 = reinterpret_cast<VT4*>(dg_col)[c+1];
VT4 inormc2 = reinterpret_cast<VT4*>(inorm_col)[c+1];
ulonglong2 mp_col_check1, mp_col_check2;
ulonglong2 mp_row_check;
// Copy the pieces of the cache we will use into registers with vectorized loads
if(full_join || only_col) {
mp_col_check1 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2];
}
if(full_join || !only_col) {
mp_row_check = reinterpret_cast<ulonglong2*>(local_mp_row)[r];
}
// Due to a lack of registers on volta, we only load these row values 2 at a time
VT2 dgr = reinterpret_cast<VT2*>(dg_row)[r];
VT2 dfr = reinterpret_cast<VT2*>(df_row)[r];
VT2 inormr = reinterpret_cast<VT2*>(inorm_row)[r];
// Do rows one at a time:
// We are computing a tile that looks like this:
// C:1 2 3 4 5 6 7
//R1 X X X X
//R2 X X X X
//R3 X X X X
//R4 X X X X
// For 4 diagonals unrolled 4 times we compute a total of 16 distances.
// These distances cover 4 possible rows and 7 possible columns, so we need to check the matrix profile
// 11 times total, once for each row and once for each column
mp_entry e;
e.ulong = mp_row_check.x;
do_unrolled_row4<T, full_join,only_col>(cov1, cov2, cov3, cov4, distc.x, distc.y, distc.z, distc.w,
idxc.x, idxc.y, idxc.z, idxc.w, inormc.x, inormc.y, inormc.z,
inormc.w, inormr.x, dfc.x, dfc.y, dfc.z, dfc.w, dgc.x, dgc.y,
dgc.z, dgc.w, dfr.x, dgr.x, i, j, y, x, local_mp_row, e.floats[0]);
// Each row's computation allows us to complete a column, the first row completes column 1
if(full_join || only_col) {
e.ulong = mp_col_check1.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j), distc.x, idxc.x, e.floats[0]);
}
e.ulong = mp_row_check.y;
do_unrolled_row4<T,full_join, only_col>(cov1, cov2, cov3, cov4, distc.y, distc.z, distc.w, distc2.x,
idxc.y, idxc.z, idxc.w, idxc2.x, inormc.y, inormc.z, inormc.w,
inormc2.x, inormr.y, dfc.y, dfc.z, dfc.w, dfc2.x, dgc.y, dgc.z,
dgc.w, dgc2.x, dfr.y, dgr.y, i + 1, j + 1, y + 1, x + 1,
local_mp_row, e.floats[0]);
// The second row completes column 2
if(full_join || only_col) {
e.ulong = mp_col_check1.y;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 1), distc.y, idxc.y, e.floats[0]);
}
// Load the values for the next 2 rows
dgr = reinterpret_cast<VT2*>(dg_row)[r + 1];
dfr = reinterpret_cast<VT2*>(df_row)[r + 1];
inormr = reinterpret_cast<VT2*>(inorm_row)[r + 1];
if(full_join || !only_col) {
mp_row_check = reinterpret_cast<ulonglong2*>(local_mp_row)[r + 1];
}
e.ulong = mp_row_check.x;
do_unrolled_row4<T,full_join,only_col>(cov1, cov2, cov3, cov4, distc.z, distc.w, distc2.x, distc2.y,
idxc.z, idxc.w, idxc2.x, idxc2.y, inormc.z, inormc.w, inormc2.x,
inormc2.y, inormr.x, dfc.z, dfc.w, dfc2.x, dfc2.y, dgc.z, dgc.w,
dgc2.x, dgc2.y, dfr.x, dgr.x, i + 2, j + 2, y + 2, x + 2,
local_mp_row, e.floats[0]);
// The third row completes column 3
if(full_join || only_col) {
mp_col_check2 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2 + 1];
e.ulong = mp_col_check2.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 2), distc.z, idxc.z, e.floats[0]);
}
e.ulong = mp_row_check.y;
do_unrolled_row4<T,full_join,only_col>(cov1, cov2, cov3, cov4, distc.w, distc2.x, distc2.y, distc2.z,
idxc.w, idxc2.x, idxc2.y, idxc2.z, inormc.w, inormc2.x, inormc2.y,
inormc2.z, inormr.y, dfc.w, dfc2.x, dfc2.y, dfc2.z, dgc.w, dgc2.x,
dgc2.y, dgc2.z, dfr.y, dgr.y, i + 3, j + 3, y + 3, x + 3,
local_mp_row, e.floats[0]);
// After the 4th row, we have completed columns 4, 5, 6, and 7
if(full_join || only_col) {
e.ulong = mp_col_check2.y;
mp_col_check1 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2+2];
mp_col_check2 = reinterpret_cast<ulonglong2*>(local_mp_col)[c2+3];
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 3), distc.w, idxc.w, e.floats[0]);
e.ulong = mp_col_check1.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 4), distc2.x, idxc2.x, e.floats[0]);
e.ulong = mp_col_check1.y;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 5), distc2.y, idxc2.y, e.floats[0]);
e.ulong = mp_col_check2.x;
MPatomicMax_check((unsigned long long*) (local_mp_col + j + 6), distc2.z, idxc2.z, e.floats[0]);
}
}
// Does a single iteration of the inner loop on 2 diagonals per thread, not unrolled
// Checks for the boundary case where only 1 diagonal can be updated
template<class T, bool full_join, bool only_col>
__device__ inline void do_iteration_2diag(int i, int j, int x, int y,
size_t global_start_x, size_t global_start_y,
int n, T &cov1, T &cov2, T* __restrict__ df_col,
T* __restrict__ df_row, T* __restrict__ dg_col,
T* __restrict__ dg_row, T* __restrict__ inorm_col,
T* __restrict__ inorm_row, mp_entry* __restrict__ local_mp_col,
mp_entry* __restrict__ local_mp_row, size_t diag, size_t num_diags)
{
float dist_1;
unsigned int idx_1;
float2 dist;
// Compute the next set of distances (row y)
dist.x = cov1 * inorm_col[j] * inorm_row[i];
dist.y = cov2 * inorm_col[j + 1] * inorm_row[i];
// Update cov and compute the next distance values (row y)
cov1 = cov1 + df_col[j] * dg_row[i] + dg_col[j] * df_row[i];
cov2 = cov2 + df_col[j+1] * dg_row[i] + dg_col[j+1] * df_row[i];
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j), dist.x, y + global_start_y);
}
dist_1 = dist.x;
idx_1 = x + global_start_x;
if(x + 1 < n && diag + 1 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.y, idx_1, global_start_x + x + 1, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 1), dist.y, y + global_start_y);
}
}
if(full_join || !only_col) {
MPatomicMax((unsigned long long*) (local_mp_row + i), dist_1, idx_1);
}
}
// Does a single iteration of the inner loop on 4 diagonals per thread, not unrolled
// Checks for the boundary case where only 1, 2, or 3 diagonals can be updated
template<class T, bool full_join, bool only_col>
__device__ inline void do_iteration_4diag(int i, int j, int x, int y,
size_t global_start_x, size_t global_start_y,
int n, T &cov1, T &cov2,
T &cov3, T &cov4, T* __restrict__ df_col,
T* __restrict__ df_row, T* __restrict__ dg_col,
T* __restrict__ dg_row, T* __restrict__ inorm_col,
T* __restrict__ inorm_row, mp_entry* __restrict__ local_mp_col,
mp_entry* __restrict__ local_mp_row, size_t diag, size_t num_diags)
{
float dist_1;
unsigned int idx_1;
float4 dist;
// Compute the next set of distances (row y)
dist.x = static_cast<float>(cov1) * inorm_col[j] * inorm_row[i];
dist.y = static_cast<float>(cov2) * inorm_col[j + 1] * inorm_row[i];
dist.z = static_cast<float>(cov3) * inorm_col[j + 2] * inorm_row[i];
dist.w = static_cast<float>(cov4) * inorm_col[j + 3] * inorm_row[i];
// Update cov and compute the next distance values (row y)
cov1 = cov1 + df_col[j] * dg_row[i] + dg_col[j] * df_row[i];
cov2 = cov2 + df_col[j+1] * dg_row[i] + dg_col[j+1] * df_row[i];
cov3 = cov3 + df_col[j+2] * dg_row[i] + dg_col[j + 2] * df_row[i];
cov4 = cov4 + df_col[j+3] * dg_row[i] + dg_col[j + 3] * df_row[i];
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j), dist.x, y + global_start_y);
}
dist_1 = dist.x;
idx_1 = x + global_start_x;
if(x + 1 < n && diag + 1 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.y, idx_1, global_start_x + x + 1, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 1), dist.y, y + global_start_y);
}
}
if(x + 2 < n && diag + 2 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.z, idx_1, global_start_x + x + 2, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 2), dist.z, y + global_start_y);
}
}
if(x + 3 < n && diag + 3 < num_diags) {
if(full_join || !only_col) {
MPMax(dist_1, dist.w, idx_1, global_start_x + x + 3, dist_1, idx_1);
}
if(full_join || only_col) {
MPatomicMax((unsigned long long*) (local_mp_col + j + 3), dist.w, y + global_start_y);
}
}
if(full_join || !only_col) {
MPatomicMax((unsigned long long*) (local_mp_row + i), dist_1, idx_1);
}
}
//Computes the matrix profile given the sliding dot products for the first query and the precomputed data statisics
template<class T, class T2, class T4, bool fp64, bool full_join, bool only_col, int blocks_per_sm, int diags_per_thread, int tile_height, int BLOCKSZ>
__global__ void __launch_bounds__(BLOCKSZ, blocks_per_sm)
do_tile(const double* __restrict__ Cov, const double* __restrict__ dfa,
const double* __restrict__ dfb, const double* __restrict__ dga,
const double* __restrict__ dgb, const double* __restrict__ normsa,
const double* __restrict__ normsb,
unsigned long long* __restrict__ profile_A,
unsigned long long* __restrict__ profile_B,
const unsigned int m, const unsigned int n_x, const unsigned int n_y,
const unsigned int global_start_x, const unsigned int global_start_y,
const int exclusion_lower, const int exclusion_upper)
{
// tile_height must be a multiple of 4
// Tuned for V100
const int tile_width = tile_height + BLOCKSZ * diags_per_thread;
extern __shared__ char smem[];
mp_entry *local_mp_col, *local_mp_row;
T *df_col, *dg_col, *inorm_col, *df_row, *dg_row, *inorm_row;
df_col = (T*) smem;
dg_col = df_col + tile_width;
inorm_col = dg_col + tile_width;
df_row = inorm_col + tile_width;
dg_row = df_row + tile_height;
inorm_row = dg_row + tile_height;
mp_entry *pos = (mp_entry*) (inorm_row + tile_height);
if(!full_join && only_col) {
local_mp_col = pos;
} else if(!full_join) {
local_mp_row = pos;
} else {
local_mp_col = pos;
local_mp_row = pos + tile_width;
}
const unsigned int start_diag = (threadIdx.x * diags_per_thread) + blockIdx.x * (blockDim.x * diags_per_thread);
// This is the index of the meta-diagonal that this thread block will work on
const unsigned int meta_diagonal_idx = blockIdx.x;
// The first threads are acutally computing the trivial match between the same subsequence
// we exclude these from the calculation
int tile_start_x = meta_diagonal_idx * (BLOCKSZ * diags_per_thread) + exclusion_lower;
int tile_start_y = 0;
// x is the global column of the distance matrix
// y is the global row of the distance matrix
// localX, localY are the local coordinates of the thread position in the tile it is working on
int x = tile_start_x + threadIdx.x * diags_per_thread;
int y = 0;
// Each thread updates 2 diagonals at once
T cov1, cov2, cov3, cov4;
const unsigned int num_diags = n_x - exclusion_upper;
// Load the first dot product values
if (x < n_x) {
cov1 = Cov[x];
}
if (x + 1 < n_x && diags_per_thread > 1) {
cov2 = Cov[x + 1];
}
if (x + 2 < n_x && diags_per_thread > 2) {
cov3 = Cov[x + 2];
}
if(x + 3 < n_x && diags_per_thread > 3) {
cov4 = Cov[x + 3];
}
/////////////////////////////////////
// Main loop
/////////////////////////////////////
// Each threadblock finds all the distances on a 'metadiagonal'
// We use a tiled approach for each thread block
// The tiles are horizontal slices of the diagonal, think of a parallelogram cut
// from a diagonal slice of the distance matrix
// Each thread starts on the first row and works its way down-right towards right
// side of the distance matrix
while (tile_start_x < n_x && tile_start_y < n_y)
{
// Initialize the next tile's shared memory
initialize_tile_memory<T,tile_height,tile_width,full_join,only_col, BLOCKSZ>(profile_A, profile_B, dfa, dfb, dga, dgb,
normsa, normsb, local_mp_col, local_mp_row,
df_col, df_row, dg_col, dg_row, inorm_col, inorm_row,
n_x, n_y, tile_start_x, tile_start_y);
// Start of new tile, sync
__syncthreads();
// There are 2 pathways here, most of the time we take the fast path (top),
// the last block will take the slower path as well as the fast path (bottom)
if(tile_start_x + tile_width < n_x && tile_start_y + tile_height < n_y && start_diag + diags_per_thread - 1 < num_diags) {
for(int i = 0, j = threadIdx.x * diags_per_thread; i < tile_height; i+=diags_per_thread, j+=diags_per_thread) {
if(diags_per_thread == 4) {
do_iteration_unroll_4<T,T4,T2,full_join, only_col>(i,j,x + global_start_x + i,y + global_start_y + i, cov1,cov2,cov3,cov4,df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row);
} else if(diags_per_thread == 2) {
do_iteration_unroll_2<T,T2,full_join, only_col>(i,j,x + global_start_x + i,y + global_start_y + i, cov1,cov2,df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row);
}
}
x += tile_height;
y += tile_height;
} else if (start_diag < num_diags){
int localX = threadIdx.x * diags_per_thread;
int localY = 0;
while(x < n_x && y < n_y && localY < tile_height) {
if(diags_per_thread == 4) {
do_iteration_4diag<T,full_join, only_col>(localY,localX,x,y,global_start_x,global_start_y,n_x,cov1,cov2,cov3,cov4, df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row, start_diag, num_diags);
} else if(diags_per_thread == 2) {
do_iteration_2diag<T,full_join, only_col>(localY,localX,x,y,global_start_x,global_start_y,n_x,cov1,cov2,df_col, df_row, dg_col, dg_row, inorm_col, inorm_row, local_mp_col, local_mp_row, start_diag, num_diags);
}
++x;
++y;
++localX;
++localY;
}
}
// After this sync, the caches will be updated with the best so far values for this tile
__syncthreads();
int global_position, local_position;
if(full_join || only_col) {
// If we updated any values in the cached MP, try to push them to the global "master" MP
global_position = tile_start_x + threadIdx.x;
local_position = threadIdx.x;
while(local_position < tile_width && global_position < n_x) {
mp_entry e = local_mp_col[local_position];
MPatomicMax(profile_A + global_position, e.floats[0], e.ints[1]);
global_position += BLOCKSZ;
local_position += BLOCKSZ;
}
}
if(full_join || !only_col) {
global_position = tile_start_y + threadIdx.x;
local_position = threadIdx.x;
while(local_position < tile_height && global_position < n_y) {
mp_entry e = local_mp_row[local_position];
MPatomicMax(profile_B + global_position, e.floats[0], e.ints[1]);
global_position += BLOCKSZ;
local_position += BLOCKSZ;
}
}
// Update the tile position
tile_start_x += tile_height;
tile_start_y += tile_height;
// Make sure our updates were committed before we pull in the next tile
__threadfence_block();
}
}
int get_diags_per_thread(bool fp64, const cudaDeviceProp &dev_prop) {
return 4;
}
int get_blocksz(bool fp64, const cudaDeviceProp &dev_prop) {
if(fp64) {
return BLOCKSZ_DP;
} else {
return BLOCKSZ_SP;
}
}
template< class T >
int get_smem(int tile_height, bool fp64, bool full_join, bool only_column_join, const cudaDeviceProp &dev_prop) {
int smem;
int diags_per_thread = get_diags_per_thread(fp64, dev_prop);
int blocksz = get_blocksz(fp64, dev_prop);
int tile_width = blocksz * diags_per_thread + tile_height;
smem = (tile_width + tile_height) * 3 * sizeof(T);
if(full_join) {
smem += (tile_width + tile_height) * sizeof(mp_entry);
} else if( only_column_join){
smem += tile_width * sizeof(mp_entry);
} else {
smem += tile_height * sizeof(mp_entry);
}
printf("Using %d KiB smem per block\n", smem / 1024);
return smem;
}
SCRIMPError_t kernel_ab_join_upper(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, size_t global_start_x, size_t global_start_y, const cudaDeviceProp &props, bool fp64, bool full_join, cudaStream_t s)
{
int diags_per_thread = get_diags_per_thread(fp64, props);
int blocksz = get_blocksz(fp64, props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
int num_workers = ceil(tile_width / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
if(full_join) {
// We can have an exclusion zone if this ab join is part of a larger self-join
int exclusion = window_size / 4;
if(global_y + global_start_y >= global_x + global_start_x && global_start_y + global_y <= global_start_x + global_x + exclusion) {
num_workers = ceil((tile_width - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
}else {
exclusion = 0;
}
if(tile_width <= exclusion) {
return SCRIMP_NO_ERROR;
}
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, true, props);
do_tile<double, double2, double4, true, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP><<<grid,block,smem,s>>>(QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, true, props);
do_tile<float, float2, float4, false, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP><<<grid,block,smem,s>>>(QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
}
} else {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, false, true, props);
do_tile<double, double2, double4, true, false, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP><<<grid,block,smem,s>>>(QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
0,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, false, true, props);
do_tile<float, float2, float4, false, false, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP><<<grid,block,smem,s>>>(QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
0,0);
}
}
cudaError_t err = cudaPeekAtLastError();
if(err != cudaSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t kernel_ab_join_lower(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, size_t global_start_x, size_t global_start_y, const cudaDeviceProp &props, bool fp64, bool full_join, cudaStream_t s)
{
int diags_per_thread = get_diags_per_thread(fp64, props);
int blocksz = get_blocksz(fp64, props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
int num_workers = ceil(tile_height / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
if(full_join) {
// We can have an exclusion zone if this ab join is part of a larger self-join
int exclusion = window_size / 4;
if(global_y + global_start_y + tile_height >= global_x + global_start_x && global_y + global_start_y + tile_height <= global_x + global_start_x + exclusion) {
num_workers = ceil((tile_height - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
} else {
exclusion = 0;
}
if(tile_height <= exclusion) {
return SCRIMP_NO_ERROR;
}
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, true, props);
do_tile<double, double2, double4, true, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP><<<grid,block,smem,s>>>(QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,exclusion);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, true, props);
do_tile<float, float2, float4, false, true, true, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP><<<grid,block,smem,s>>>(QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,exclusion);
}
} else {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, false, false, props);
do_tile<double, double2, double4, true, false, false, BLOCKSPERSM_AB, 4, TILE_HEIGHT_DP, BLOCKSZ_DP><<<grid,block,smem,s>>>(QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, false, false, props);
do_tile<float, float2, float4, false, false, false, BLOCKSPERSM_AB, 4, TILE_HEIGHT, BLOCKSZ_SP><<<grid,block,smem,s>>>(QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0,0);
}
}
cudaError_t err = cudaPeekAtLastError();
if(err != cudaSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t kernel_self_join_upper(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, const cudaDeviceProp &props, bool fp64, cudaStream_t s)
{
int exclusion = window_size / 4;
int diags_per_thread = get_diags_per_thread(fp64,props);
int blocksz = get_blocksz(fp64,props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
if(global_y >= global_x && global_y <= global_x + exclusion) {
int num_workers = ceil((tile_width - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
} else {
int num_workers = ceil(tile_width / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
exclusion = 0;
}
if(exclusion < tile_width) {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, false, props);
do_tile<double, double2, double4, true, true,false, BLOCKSPERSM_SELF, 4, TILE_HEIGHT_DP, BLOCKSZ_DP><<<grid,block,smem,s>>>(QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, false, props);
do_tile<float, float2, float4, false, true,false, BLOCKSPERSM_SELF,4, TILE_HEIGHT, BLOCKSZ_SP><<<grid,block,smem,s>>>(QT,df_A,df_B,dg_A,dg_B,norms_A,norms_B,profile_A, profile_B,
window_size, tile_width, tile_height, global_x, global_y,
exclusion,0);
}
}
cudaError_t err = cudaPeekAtLastError();
if(err != cudaSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
SCRIMPError_t kernel_self_join_lower(const double *QT, const double *timeseries_A, const double *timeseries_B, const double *df_A, const double *df_B, const double *dg_A, const double *dg_B, const double *norms_A, const double *norms_B, unsigned long long int *profile_A, unsigned long long int *profile_B, size_t window_size, size_t tile_width, size_t tile_height, size_t global_x, size_t global_y, const cudaDeviceProp &props, bool fp64, cudaStream_t s)
{
int exclusion = window_size / 4;
int diags_per_thread = get_diags_per_thread(fp64, props);
int blocksz = get_blocksz(fp64, props);
dim3 grid(1,1,1);
dim3 block(blocksz, 1, 1);
if(global_y + tile_height >= global_x && global_y + tile_height <= global_x + exclusion) {
int num_workers = ceil((tile_height - exclusion) / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
} else {
int num_workers = ceil(tile_height / (float) diags_per_thread);
grid.x = ceil(num_workers / (double) blocksz);
exclusion = 0;
}
if(exclusion < tile_height) {
if(fp64) {
int smem = get_smem<double>(TILE_HEIGHT_DP, fp64, true, false, props);
do_tile<double, double2,double4, true, true,false, BLOCKSPERSM_SELF, 4, TILE_HEIGHT_DP, BLOCKSZ_DP><<<grid,block,smem,s>>>(QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0, exclusion);
} else {
int smem = get_smem<float>(TILE_HEIGHT, fp64, true, false, props);
do_tile<float,float2,float4, false, true,false, BLOCKSPERSM_SELF, 4, TILE_HEIGHT, BLOCKSZ_SP><<<grid,block,smem,s>>>(QT,df_B,df_A,dg_B,dg_A,norms_B,norms_A,profile_B, profile_A,
window_size, tile_height, tile_width, global_y, global_x,
0, exclusion);
}
}
cudaError_t err = cudaPeekAtLastError();
if(err != cudaSuccess) {
return SCRIMP_CUDA_ERROR;
}
return SCRIMP_NO_ERROR;
}
} // namespace SCRIMP
|
90b5471ca23819cf029f448bc315275c09a24d30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/common_cu.h"
#include "initializers/step.h"
#include "nodes/variable.h"
__global__
void StepFillKernel(const int n, float *out, const float min, const float step)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) out[i] = min + i * step;
}
Step::Step(deepflow::InitParam *param) : Initializer(param) {
LOG_IF(FATAL, param->has_step_param() == false) << "param.has_step_param() == false";
}
void Step::init() {
}
std::string Step::to_cpp() const
{
std::string cpp = "df.step(";
cpp += "{" + std::to_string(_dims[0]) + ", " + std::to_string(_dims[1]) + ", " + std::to_string(_dims[2]) + ", " + std::to_string(_dims[3]) + "}, ";
float min = _param->step_param().min();
float max = _param->step_param().max();
LOG_IF(FATAL, max < min) << "max < min";
cpp += std::to_string(min) + ", ";
cpp += std::to_string(max);
cpp += ")";
return cpp;
}
void Step::apply(Node *node) {
int size = node->output(0)->value()->size();
float min = _param->step_param().min();
float max = _param->step_param().max();
for (auto output : node->outputs()) {
StepFillKernel << <numOfBlocks(size), maxThreadsPerBlock >> > (size, (float*)output->value()->gpu_data(), min, (max - min) / size);
DF_KERNEL_CHECK();
}
} | 90b5471ca23819cf029f448bc315275c09a24d30.cu | #include "core/common_cu.h"
#include "initializers/step.h"
#include "nodes/variable.h"
__global__
void StepFillKernel(const int n, float *out, const float min, const float step)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) out[i] = min + i * step;
}
Step::Step(deepflow::InitParam *param) : Initializer(param) {
LOG_IF(FATAL, param->has_step_param() == false) << "param.has_step_param() == false";
}
void Step::init() {
}
std::string Step::to_cpp() const
{
std::string cpp = "df.step(";
cpp += "{" + std::to_string(_dims[0]) + ", " + std::to_string(_dims[1]) + ", " + std::to_string(_dims[2]) + ", " + std::to_string(_dims[3]) + "}, ";
float min = _param->step_param().min();
float max = _param->step_param().max();
LOG_IF(FATAL, max < min) << "max < min";
cpp += std::to_string(min) + ", ";
cpp += std::to_string(max);
cpp += ")";
return cpp;
}
void Step::apply(Node *node) {
int size = node->output(0)->value()->size();
float min = _param->step_param().min();
float max = _param->step_param().max();
for (auto output : node->outputs()) {
StepFillKernel << <numOfBlocks(size), maxThreadsPerBlock >> > (size, (float*)output->value()->gpu_data(), min, (max - min) / size);
DF_KERNEL_CHECK();
}
} |