hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
fcd394853732933cc2ddcf59fa29d561f0263cb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iso646.h> #include <cstdio> #include <cstdint> #include <cstdlib> #include "SyncedMemory.h" #include "lab1.h" using namespace std; #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { Lab1VideoGenerator g; Lab1VideoInfo i; g.get_info(i); if (i.w == 0 | i.h == 0 | i.n_frame == 0 | i.fps_n == 0 | i.fps_d == 0) { puts("Cannot be zero"); abort(); } else if (i.w%2 != 0 | i.h%2 != 0) { puts("Only even frame size is supported"); abort(); } unsigned FRAME_SIZE = i.w*i.h*3/2; MemoryBuffer<uint8_t> frameb(FRAME_SIZE); auto frames = frameb.CreateSync(FRAME_SIZE); FILE *fp = fopen("result.y4m", "wb"); printf("start"); fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d); for (unsigned j = 0; j < i.n_frame; ++j) { fputs("FRAME\n", fp); g.Generate(frames.get_gpu_wo()); fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp); } fclose(fp); printf("end"); return 0; }
fcd394853732933cc2ddcf59fa29d561f0263cb1.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iso646.h> #include <cstdio> #include <cstdint> #include <cstdlib> #include "SyncedMemory.h" #include "lab1.h" using namespace std; #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { Lab1VideoGenerator g; Lab1VideoInfo i; g.get_info(i); if (i.w == 0 | i.h == 0 | i.n_frame == 0 | i.fps_n == 0 | i.fps_d == 0) { puts("Cannot be zero"); abort(); } else if (i.w%2 != 0 | i.h%2 != 0) { puts("Only even frame size is supported"); abort(); } unsigned FRAME_SIZE = i.w*i.h*3/2; MemoryBuffer<uint8_t> frameb(FRAME_SIZE); auto frames = frameb.CreateSync(FRAME_SIZE); FILE *fp = fopen("result.y4m", "wb"); printf("start"); fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d); for (unsigned j = 0; j < i.n_frame; ++j) { fputs("FRAME\n", fp); g.Generate(frames.get_gpu_wo()); fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp); } fclose(fp); printf("end"); return 0; }
d654bdeca448d1a413a7cc87ccc3b4b7f18a965d.hip
// !!! This is a file automatically generated by hipify!!! /* * Discrete Cosine/Sine Transform(DCT/DST and IDCT/IDST one to four-all in one) * DCT/DST and IDCT/IDST I ---> IV * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * GpuArray/cpuArray output, B=Discrete_Transform(A, , type of Transform (sine or cosine), type of Transform(direct/inverse), type of DCT/DST or IDCT/IDST, dimensions). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include "CuFilesD/Discrete_Transform_kernel.cuh" #include "CuFilesD/DCT_I_Column.cu" #include "CuFilesD/DCT_I_Row.cu" #include "CuFilesD/DCT_I_Column_Inverse.cu" #include "CuFilesD/DCT_I_Row_Inverse.cu" #include "CuFilesD/DCT_II_Row.cu" #include "CuFilesD/DCT_II_Row_Inverse.cu" #include "CuFilesD/DCT_II_Column.cu" #include "CuFilesD/DCT_II_Column_Inverse.cu" #include "CuFilesD/DCT_III_Row.cu" #include "CuFilesD/DCT_III_Row_Inverse.cu" #include "CuFilesD/DCT_III_Column.cu" #include "CuFilesD/DCT_III_Column_Inverse.cu" #include "CuFilesD/DCT_IV_Row.cu" #include "CuFilesD/DCT_IV_Row_Inverse.cu" #include "CuFilesD/DCT_IV_Column.cu" #include "CuFilesD/DCT_IV_Column_Inverse.cu" #include "CuFilesD/DST_I_Column.cu" #include "CuFilesD/DST_I_Row.cu" #include "CuFilesD/DST_I_Column_Inverse.cu" #include "CuFilesD/DST_I_Row_Inverse.cu" #include "CuFilesD/DST_II_Row.cu" #include "CuFilesD/DST_II_Row_Inverse.cu" #include "CuFilesD/DST_II_Column.cu" #include "CuFilesD/DST_II_Column_Inverse.cu" #include "CuFilesD/DST_III_Row.cu" #include "CuFilesD/DST_III_Row_Inverse.cu" #include "CuFilesD/DST_III_Column.cu" #include "CuFilesD/DST_III_Column_Inverse.cu" #include "CuFilesD/DST_IV_Row.cu" #include "CuFilesD/DST_IV_Row_Inverse.cu" #include "CuFilesD/DST_IV_Column.cu" #include "CuFilesD/DST_IV_Column_Inverse.cu" //#include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) //#define TILE_DIM 16 unsigned int TILE_DIM=16; // DCT extern "C" void CalculateTransformDCTColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); // DST extern "C" void CalculateTransformDSTColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" static void mexTransD(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } char row[] = "row"; char column[] = "column"; char one[] = "one"; char two[] = "two"; char three[] = "three"; char four[] = "four"; char direct[] = "direct"; char inverse[] = "inverse"; char cosine[] = "cosine"; char sine[] = "sine"; char const * const InputErrMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float), and the number of input arguments must be five."; if ((nrhs!=5)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(prhs[0]); char *input_buf1; input_buf1 = mxArrayToString(prhs[1]); char *input_buf2; input_buf2 = mxArrayToString(prhs[2]); char *input_buf3; input_buf3 = mxArrayToString(prhs[3]); char *input_buf4; input_buf4 = mxArrayToString(prhs[4]); if ((mxIsChar(prhs[0]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } if (!(mxIsChar(prhs[1]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[2]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[3]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[4]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be of type string.\n."); } if ((strcmp (cosine,input_buf1) != 0) &&(strcmp (sine,input_buf1) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be 'cosine' or 'sine' not %s\n",input_buf1); } if ((strcmp (direct,input_buf2) != 0)&& (strcmp (inverse,input_buf2) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be 'direct' or 'inverse' not %s\n",input_buf2); } if ((strcmp (one,input_buf3) != 0)&& (strcmp (two,input_buf3) != 0) && (strcmp (three,input_buf3) != 0) && (strcmp (four,input_buf3) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be 'one' or 'two' or 'three' or 'four' not %s\n",input_buf3); } if ((strcmp (column,input_buf4) != 0)&&(strcmp (row,input_buf4) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be 'column' or 'row' not %s\n",input_buf4); } //COSINE TRANSFORM if (strcmp (cosine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer =(float*) mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } mxInitGPU(); hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } mxInitGPU(); numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer =(float*) mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } } //SINE TRANSFORM if (strcmp (sine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } mxInitGPU(); hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); hipError_t error; int devID = 0; error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } mxInitGPU(); numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer =(float*) mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } } }
d654bdeca448d1a413a7cc87ccc3b4b7f18a965d.cu
/* * Discrete Cosine/Sine Transform(DCT/DST and IDCT/IDST one to four-all in one) * DCT/DST and IDCT/IDST I ---> IV * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * GpuArray/cpuArray output, B=Discrete_Transform(A, , type of Transform (sine or cosine), type of Transform(direct/inverse), type of DCT/DST or IDCT/IDST, dimensions). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include "CuFilesD/Discrete_Transform_kernel.cuh" #include "CuFilesD/DCT_I_Column.cu" #include "CuFilesD/DCT_I_Row.cu" #include "CuFilesD/DCT_I_Column_Inverse.cu" #include "CuFilesD/DCT_I_Row_Inverse.cu" #include "CuFilesD/DCT_II_Row.cu" #include "CuFilesD/DCT_II_Row_Inverse.cu" #include "CuFilesD/DCT_II_Column.cu" #include "CuFilesD/DCT_II_Column_Inverse.cu" #include "CuFilesD/DCT_III_Row.cu" #include "CuFilesD/DCT_III_Row_Inverse.cu" #include "CuFilesD/DCT_III_Column.cu" #include "CuFilesD/DCT_III_Column_Inverse.cu" #include "CuFilesD/DCT_IV_Row.cu" #include "CuFilesD/DCT_IV_Row_Inverse.cu" #include "CuFilesD/DCT_IV_Column.cu" #include "CuFilesD/DCT_IV_Column_Inverse.cu" #include "CuFilesD/DST_I_Column.cu" #include "CuFilesD/DST_I_Row.cu" #include "CuFilesD/DST_I_Column_Inverse.cu" #include "CuFilesD/DST_I_Row_Inverse.cu" #include "CuFilesD/DST_II_Row.cu" #include "CuFilesD/DST_II_Row_Inverse.cu" #include "CuFilesD/DST_II_Column.cu" #include "CuFilesD/DST_II_Column_Inverse.cu" #include "CuFilesD/DST_III_Row.cu" #include "CuFilesD/DST_III_Row_Inverse.cu" #include "CuFilesD/DST_III_Column.cu" #include "CuFilesD/DST_III_Column_Inverse.cu" #include "CuFilesD/DST_IV_Row.cu" #include "CuFilesD/DST_IV_Row_Inverse.cu" #include "CuFilesD/DST_IV_Column.cu" #include "CuFilesD/DST_IV_Column_Inverse.cu" //#include <math.h> #include <cuda.h> #include <cuda_runtime.h> #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) //#define TILE_DIM 16 unsigned int TILE_DIM=16; // DCT extern "C" void CalculateTransformDCTColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); // DST extern "C" void CalculateTransformDSTColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowOne(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnTwo(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowThree(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowFour(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" static void mexTransD(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } char row[] = "row"; char column[] = "column"; char one[] = "one"; char two[] = "two"; char three[] = "three"; char four[] = "four"; char direct[] = "direct"; char inverse[] = "inverse"; char cosine[] = "cosine"; char sine[] = "sine"; char const * const InputErrMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float), and the number of input arguments must be five."; if ((nrhs!=5)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(prhs[0]); char *input_buf1; input_buf1 = mxArrayToString(prhs[1]); char *input_buf2; input_buf2 = mxArrayToString(prhs[2]); char *input_buf3; input_buf3 = mxArrayToString(prhs[3]); char *input_buf4; input_buf4 = mxArrayToString(prhs[4]); if ((mxIsChar(prhs[0]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } if (!(mxIsChar(prhs[1]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[2]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[3]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[4]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be of type string.\n."); } if ((strcmp (cosine,input_buf1) != 0) &&(strcmp (sine,input_buf1) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be 'cosine' or 'sine' not %s\n",input_buf1); } if ((strcmp (direct,input_buf2) != 0)&& (strcmp (inverse,input_buf2) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be 'direct' or 'inverse' not %s\n",input_buf2); } if ((strcmp (one,input_buf3) != 0)&& (strcmp (two,input_buf3) != 0) && (strcmp (three,input_buf3) != 0) && (strcmp (four,input_buf3) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be 'one' or 'two' or 'three' or 'four' not %s\n",input_buf3); } if ((strcmp (column,input_buf4) != 0)&&(strcmp (row,input_buf4) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be 'column' or 'row' not %s\n",input_buf4); } //COSINE TRANSFORM if (strcmp (cosine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer =(float*) mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } mxInitGPU(); hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } mxInitGPU(); numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer =(float*) mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } } //SINE TRANSFORM if (strcmp (sine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } mxInitGPU(); hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Inverse_Kernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Inverse_Kernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } mxInitGPU(); float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer = (float*)mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } if (strcmp (row,input_buf4) == 0) { if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; float const *d_A; float *d_B; int numARows, numAColumns, numCRows, numCColumns; mxInitGPU(); cudaError_t error; int devID = 0; error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; A = mxGPUCreateFromMxArray(prhs[0]); if(mxGPUGetComplexity(A) != mxREAL){ mxGPUDestroyGPUArray(A); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ size_t pivot_dimensA[2] = {numARows,numAColumns}; mwSize NrOfDim=mxGPUGetNumberOfDimensions(A); if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."; if (mxGPUGetClassID(A) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (float const *)(mxGPUGetDataReadOnly(A)); mxGPUDestroyGPUArray(A); B = mxGPUCreateGPUArray(NrOfDim, (mwSize*) pivot_dimensA, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_B = (float *)(mxGPUGetData(B)); dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row__InverseKernel_GPUA <16> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row__InverseKernel_GPUA <32> << <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(B); } } else if (!(mxIsGPUArray(prhs[0]))){ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, input(FIRST ARGUMENT) must be single precision (float)."); } if(mxIsComplex(prhs[0])){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input matrix must be real %s\n"); } int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } mxInitGPU(); numCRows = numARows; numCColumns = numAColumns; float * hostA ; // The A matrix hostA = (float *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxSINGLE_CLASS, mxREAL); float *pointer =(float*) mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } } } } } }
464e3d1584f0013dfda51116d9aaaf21bd91bc13.hip
// !!! This is a file automatically generated by hipify!!! #include "lab08.cuh" #include <iostream> #include <sstream> #include <iomanip> #include <cmath> #include <fstream> #include <algorithm> #include <iterator> #include <cstdio> #include "MPI_dummy_helper.hpp" #include "dummy_helper.cuh" #include <mpi.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #define GRID_SIZE 1 #define BLOCK_SIZE 4 #define GRID_SIZE_dim3 dim3(GRID_SIZE, GRID_SIZE, GRID_SIZE) #define BLOCK_SIZE_dim3 dim3(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE) #define locate(i, j, k) block_h[(i) + (j) * block_shape[0] + (k) * block_shape[0] * block_shape[1]] #define locate_p(v, i, j, k) v[(i) + (j) * block_shape[0] + (k) * block_shape[0] * block_shape[1]] __global__ void init_array( double *v, long long count, double init_value ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long id = idx + idy * blockDim.x * gridDim.x + idz * blockDim.x * gridDim.x * blockDim.y * gridDim.y; const long long offset = gridDim.x * blockDim.x * gridDim.y * blockDim.y * gridDim.z * blockDim.z; for (long long i = id; i < count; i += offset) v[i] = init_value; } void Lab08::set_device() { int device_count; checkCudaErrors(hipGetDeviceCount(&device_count)); checkCudaErrors(hipSetDevice(rank % device_count)); } Lab08::Lab08(int argc, char **argv) { init(argc, argv); checkMPIErrors(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); set_device(); // read input data if (rank == 0) rank_0_init(); else rank_non_0_init(); block_z = rank / process_grid_shape[0] / process_grid_shape[1]; block_y = rank % (process_grid_shape[0] * process_grid_shape[1]) / process_grid_shape[0]; block_x = rank % (process_grid_shape[0] * process_grid_shape[1]) % process_grid_shape[0]; sends_first = (block_x + block_y + block_z) % 2; CudaKernelChecker kernel_checker; auto block_init_routine = [&kernel_checker, this](CudaMemory<double> &v, const char* kernel_name) { v.alloc(block_shape[0] * block_shape[1] * block_shape[2]); hipLaunchKernelGGL(( init_array), dim3(GRID_SIZE_dim3), dim3(BLOCK_SIZE_dim3), 0, 0, v.get(), block_shape[0] * block_shape[1] * block_shape[2], u_0 ); kernel_checker.check(kernel_name); }; block_init_routine(block_d, "init block_d"); block_init_routine(prev_block_d, "init prev_block_d"); auto boundary_layer_init_routine = [this]( std::vector<double> &v_h, CudaMemory<double> &v_d, const bool layer_not_needed, const long long count ) { v_h.resize( layer_not_needed ? 0 : count); v_d.alloc( layer_not_needed ? 0 : count); }; boundary_layer_init_routine( left_h, left_d, block_x == 0, block_shape[1] * block_shape[2] ); boundary_layer_init_routine( right_h, right_d, block_x == process_grid_shape[0] - 1, block_shape[1] * block_shape[2] ); boundary_layer_init_routine( front_h, front_d, block_y == 0, block_shape[0] * block_shape[2] ); boundary_layer_init_routine( back_h, back_d, block_y == process_grid_shape[1] - 1, block_shape[0] * block_shape[2] ); boundary_layer_init_routine( down_h, down_d, block_z == 0, block_shape[0] * block_shape[1] ); boundary_layer_init_routine( up_h, up_d, block_z == process_grid_shape[2] - 1, block_shape[0] * block_shape[1] ); timer.start(); } void Lab08::init(int argc, char **argv) { int initialized; checkMPIErrors(MPI_Initialized( &initialized )); if (!initialized) { checkMPIErrors(MPI_Init(&argc, &argv)); } } void Lab08::finalize() { int finalized; checkMPIErrors(MPI_Finalized( &finalized )); if (!finalized) { checkMPIErrors(MPI_Barrier(MPI_COMM_WORLD)); checkMPIErrors(MPI_Finalize()); } } void Lab08::rank_0_init() { // input read_in_container(process_grid_shape); read_in_container(block_shape); std::cin >> output_name; std::cin >> eps; read_in_container(l); std::cin >> boundaries.down >> boundaries.up >> boundaries.left >> boundaries.right >> boundaries.front >> boundaries.back; std::cin >> u_0; // input done // send input data to other ranks int n_ranks = process_grid_shape[0] * process_grid_shape[1] * process_grid_shape[2]; for (int rank = 1; rank < n_ranks; ++rank) { checkMPIErrors(MPI_Send( process_grid_shape.data(), process_grid_shape.size(), MPI_LONG_LONG, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( block_shape.data(), block_shape.size(), MPI_LONG_LONG, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( output_name.data(), output_name.size(), MPI_CHAR, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( &eps, 1, MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( l.data(), l.size(), MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( reinterpret_cast<double*>(&boundaries), sizeof(boundaries) / sizeof(double), MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( &u_0, 1, MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); } } void Lab08::rank_non_0_init() { int root_rank = 0; checkMPIErrors(MPI_Recv( process_grid_shape.data(), process_grid_shape.size(), MPI_LONG_LONG, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( block_shape.data(), block_shape.size(), MPI_LONG_LONG, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); MPI_Status status; checkMPIErrors(MPI_Probe( root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, &status )); int output_name_count; checkMPIErrors(MPI_Get_count( &status, MPI_CHAR, &output_name_count )); std::vector<char> output_name_buffer(output_name_count); checkMPIErrors(MPI_Recv( output_name_buffer.data(), output_name_buffer.size(), MPI_CHAR, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); std::copy( output_name_buffer.begin(), output_name_buffer.end(), std::back_inserter(output_name) ); checkMPIErrors(MPI_Recv( &eps, 1, MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( l.data(), l.size(), MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( reinterpret_cast<double*>(&boundaries), sizeof(boundaries) / sizeof(double), MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( &u_0, 1, MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); } int Lab08::block_position_to_rank( long long block_x, long long block_y, long long block_z ) { if (block_x < 0 || block_y < 0 || block_z < 0) { return -1; } if ( block_x >= process_grid_shape[0] || block_y >= process_grid_shape[1] || block_z >= process_grid_shape[2] ) { return -1; } return block_x + block_y * process_grid_shape[0] + block_z * process_grid_shape[0] * process_grid_shape[1]; } void Lab08::send_boundary_layer( std::vector<double> &v_h, CudaMemory<double> &v_d, int destination_rank, Lab08::BoundaryLayerTag tag, std::vector<MPI_Request> &send_requests ) { if (destination_rank > -1) { v_d.memcpy( v_h.data(), hipMemcpyDeviceToHost ); MPI_Request request; checkMPIErrors(MPI_Isend( v_h.data(), v_h.size(), MPI_DOUBLE, destination_rank, static_cast<int>(tag), MPI_COMM_WORLD, &request )); send_requests.push_back(request); } } void Lab08::send_boundary_layers(std::vector<MPI_Request> &send_requests) { send_boundary_layer( left_h, left_d, block_position_to_rank(block_x - 1, block_y, block_z), BoundaryLayerTag::RIGHT, send_requests ); send_boundary_layer( right_h, right_d, block_position_to_rank(block_x + 1, block_y, block_z), BoundaryLayerTag::LEFT, send_requests ); send_boundary_layer( front_h, front_d, block_position_to_rank(block_x, block_y - 1, block_z), BoundaryLayerTag::BACK, send_requests ); send_boundary_layer( back_h, back_d, block_position_to_rank(block_x, block_y + 1, block_z), BoundaryLayerTag::FRONT, send_requests ); send_boundary_layer( down_h, down_d, block_position_to_rank(block_x, block_y, block_z - 1), BoundaryLayerTag::UP, send_requests ); send_boundary_layer( up_h, up_d, block_position_to_rank(block_x, block_y, block_z + 1), BoundaryLayerTag::DOWN, send_requests ); } void Lab08::receive_boundary_layer( std::vector<double> &v, int source_rank, BoundaryLayerTag tag, std::vector<MPI_Request> &receive_requests ) { if (source_rank > -1) { MPI_Request request; checkMPIErrors(MPI_Irecv( v.data(), v.size(), MPI_DOUBLE, source_rank, static_cast<int>(tag), MPI_COMM_WORLD, &request )); receive_requests.push_back(request); } } void Lab08::receive_boundary_layers( std::vector<double> &left_h, std::vector<double> &right_h, std::vector<double> &front_h, std::vector<double> &back_h, std::vector<double> &down_h, std::vector<double> &up_h, std::vector<MPI_Request> &receive_requests ) { receive_boundary_layer( left_h, block_position_to_rank(block_x - 1, block_y, block_z), BoundaryLayerTag::LEFT, receive_requests ); receive_boundary_layer( right_h, block_position_to_rank(block_x + 1, block_y, block_z), BoundaryLayerTag::RIGHT, receive_requests ); receive_boundary_layer( front_h, block_position_to_rank(block_x, block_y - 1, block_z), BoundaryLayerTag::FRONT, receive_requests ); receive_boundary_layer( back_h, block_position_to_rank(block_x, block_y + 1, block_z), BoundaryLayerTag::BACK, receive_requests ); receive_boundary_layer( down_h, block_position_to_rank(block_x, block_y, block_z - 1), BoundaryLayerTag::DOWN, receive_requests ); receive_boundary_layer( up_h, block_position_to_rank(block_x, block_y, block_z + 1), BoundaryLayerTag::UP, receive_requests ); } __device__ long long block_shape[3]; __global__ void copy_block_to_prev_block( double *block, double *prev_block ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; for (long long k = idz; k < block_shape[2]; k += offset_z) for (long long j = idy; j < block_shape[1]; j += offset_y) for (long long i = idx; i < block_shape[0]; i += offset_x) locate_p(prev_block, i, j, k) = locate_p(block, i, j, k); } __global__ void prev_block_to_abs_difference_array( double *block, double *prev_block ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; for (long long k = idz; k < block_shape[2]; k += offset_z) for (long long j = idy; j < block_shape[1]; j += offset_y) for (long long i = idx; i < block_shape[0]; i += offset_x) locate_p(prev_block, i, j, k) = fabs(locate_p(block, i, j, k) - locate_p(prev_block, i, j, k)); } __global__ void block_iter_process( double *block, double *prev_block, double *left, double *right, double *front, double *back, double *down, double *up, Lab08::Boundaries boundaries, double h_x_pow_minus_2, double h_y_pow_minus_2, double h_z_pow_minus_2, double denominator ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; for (long long k = idz; k < block_shape[2]; k += offset_z) for (long long j = idy; j < block_shape[1]; j += offset_y) for (long long i = idx; i < block_shape[0]; i += offset_x) { double u_left = i == 0 ? (left == nullptr ? boundaries.left : left[ j + block_shape[1] * k]) : locate_p(prev_block, i - 1, j, k), u_right = i == block_shape[0] - 1 ? (right == nullptr ? boundaries.right : right[j + block_shape[1] * k]) : locate_p(prev_block, i + 1, j, k), u_front = j == 0 ? (front == nullptr ? boundaries.front : front[i + block_shape[0] * k]) : locate_p(prev_block, i, j - 1, k), u_back = j == block_shape[1] - 1 ? (back == nullptr ? boundaries.back : back[ i + block_shape[0] * k]) : locate_p(prev_block, i, j + 1, k), u_down = k == 0 ? (down == nullptr ? boundaries.down : down[ i + block_shape[0] * j]) : locate_p(prev_block, i, j, k - 1), u_up = k == block_shape[2] - 1 ? (up == nullptr ? boundaries.up : up[ i + block_shape[0] * j]) : locate_p(prev_block, i, j, k + 1); locate_p(block, i, j, k) = (u_left + u_right) * h_x_pow_minus_2; locate_p(block, i, j, k) += (u_front + u_back ) * h_y_pow_minus_2; locate_p(block, i, j, k) += (u_down + u_up ) * h_z_pow_minus_2; locate_p(block, i, j, k) /= denominator; } } __global__ void init_boundary_layers( double *block, double *left, double *right, double *front, double *back, double *down, double *up ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; #define fill_boundary_layer(v, outer, inner, outer_start, inner_start, outer_offset, inner_offset, loc) \ { \ if (v) \ { \ for (long long i = outer_start; i < block_shape[outer]; i += outer_offset) \ for (long long j = inner_start; j < block_shape[inner]; j += inner_offset) \ v[i * block_shape[inner] + j] = loc; \ } \ } \ if (idx == 0) fill_boundary_layer(left , 2, 1, idz, idy, offset_z, offset_y, locate_p(block, 0, j, i)); if (idx == 1) fill_boundary_layer(right, 2, 1, idz, idy, offset_z, offset_y, locate_p(block, block_shape[0] - 1, j, i)); if (idy == 0) fill_boundary_layer(front, 2, 0, idz, idx, offset_z, offset_x, locate_p(block, j, 0, i)); if (idy == 1) fill_boundary_layer(back , 2, 0, idz, idx, offset_z, offset_x, locate_p(block, j, block_shape[1] - 1, i)); if (idz == 0) fill_boundary_layer(down , 1, 0, idy, idx, offset_y, offset_x, locate_p(block, j, i, 0)); if (idz == 1) fill_boundary_layer(up , 1, 0, idy, idx, offset_y, offset_x, locate_p(block, j, i, block_shape[2] - 1)); #undef fill_boundary_layer } void Lab08::copy_boundary_layers_to_device( std::vector<double> &left_h, std::vector<double> &right_h, std::vector<double> &front_h, std::vector<double> &back_h, std::vector<double> &down_h, std::vector<double> &up_h ) { auto copy_boundary_layer_to_device = []( std::vector<double> &v_h, CudaMemory<double> &v_d ) { v_d.memcpy(v_h.data(), hipMemcpyHostToDevice); }; copy_boundary_layer_to_device(left_h, left_d); copy_boundary_layer_to_device(right_h, right_d); copy_boundary_layer_to_device(front_h, front_d); copy_boundary_layer_to_device(back_h, back_d); copy_boundary_layer_to_device(down_h, down_d); copy_boundary_layer_to_device(up_h, up_d); } void Lab08::solve() { std::vector<double> left_h (block_x == 0 ? 0 : block_shape[1] * block_shape[2]), right_h(block_x == process_grid_shape[0] - 1 ? 0 : block_shape[1] * block_shape[2]), front_h(block_y == 0 ? 0 : block_shape[0] * block_shape[2]), back_h (block_y == process_grid_shape[1] - 1 ? 0 : block_shape[0] * block_shape[2]), down_h (block_z == 0 ? 0 : block_shape[0] * block_shape[1]), up_h (block_z == process_grid_shape[2] - 1 ? 0 : block_shape[0] * block_shape[1]); double n_x = block_shape[0] * process_grid_shape[0], n_y = block_shape[1] * process_grid_shape[1], n_z = block_shape[2] * process_grid_shape[2]; double h_x_pow_minus_2 = n_x * n_x / l[0] / l[0], h_y_pow_minus_2 = n_y * n_y / l[1] / l[1], h_z_pow_minus_2 = n_z * n_z / l[2] / l[2], denominator = 2 * (h_x_pow_minus_2 + h_y_pow_minus_2 + h_z_pow_minus_2); std::vector<MPI_Request> send_requests, receive_requests; checkCudaErrors(hipMemcpyToSymbol( ::block_shape, block_shape.data(), block_shape.size() * sizeof(decltype(block_shape[0])), 0, hipMemcpyHostToDevice )); thrust::device_ptr<double> abs_difference_array = thrust::device_pointer_cast(prev_block_d.get()); while (true) { CudaKernelChecker checker; hipLaunchKernelGGL(( init_boundary_layers), dim3(GRID_SIZE_dim3), dim3(BLOCK_SIZE_dim3), 0, 0, block_d.get(), left_d.get(), right_d.get(), front_d.get(), back_d.get(), down_d.get(), up_d.get() ); checker.check("init_boundary_layers"); if (sends_first) { send_boundary_layers(send_requests); checkMPIErrors(MPI_Waitall( send_requests.size(), send_requests.data(), MPI_STATUSES_IGNORE )); send_requests.clear(); receive_boundary_layers( left_h, right_h, front_h, back_h, down_h, up_h, receive_requests ); checkMPIErrors(MPI_Waitall( receive_requests.size(), receive_requests.data(), MPI_STATUSES_IGNORE )); receive_requests.clear(); } else { receive_boundary_layers( left_h, right_h, front_h, back_h, down_h, up_h, receive_requests ); checkMPIErrors(MPI_Waitall( receive_requests.size(), receive_requests.data(), MPI_STATUSES_IGNORE )); receive_requests.clear(); send_boundary_layers(send_requests); checkMPIErrors(MPI_Waitall( send_requests.size(), send_requests.data(), MPI_STATUSES_IGNORE )); send_requests.clear(); } copy_boundary_layers_to_device( left_h, right_h, front_h, back_h, down_h, up_h ); hipLaunchKernelGGL(( copy_block_to_prev_block), dim3(GRID_SIZE_dim3), dim3(BLOCK_SIZE_dim3), 0, 0, block_d.get(), prev_block_d.get() ); checker.check("copy_block_to_prev_block"); hipLaunchKernelGGL(( block_iter_process), dim3(GRID_SIZE_dim3), dim3(BLOCK_SIZE_dim3), 0, 0, block_d.get(), prev_block_d.get(), left_d.get(), right_d.get(), front_d.get(), back_d.get(), down_d.get(), up_d.get(), boundaries, h_x_pow_minus_2, h_y_pow_minus_2, h_z_pow_minus_2, denominator ); checker.check("iter process kernel"); hipLaunchKernelGGL(( prev_block_to_abs_difference_array), dim3(GRID_SIZE_dim3), dim3(BLOCK_SIZE_dim3), 0, 0, block_d.get(), prev_block_d.get() ); checker.check("prev_block_to_abs_difference_array"); double max_abs_difference = *thrust::max_element( abs_difference_array, abs_difference_array + prev_block_d.count ); double total_max_abs_difference; checkMPIErrors(MPI_Allreduce( &max_abs_difference, &total_max_abs_difference, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD )); if (total_max_abs_difference < eps) break; } } void Lab08::write_answer() { timer.stop(); if (rank == 0) timer.print_time(); MPI_File file; int delete_error = MPI_File_delete(output_name.c_str(), MPI_INFO_NULL); if (delete_error != 0 && delete_error != MPI_ERR_NO_SUCH_FILE) checkMPIErrors(delete_error); checkMPIErrors(MPI_File_open( MPI_COMM_WORLD, output_name.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file )); // create type MPI_Datatype Number; const int number_chars_count = 16; // 0 . 000000 e+000 ' ' checkMPIErrors(MPI_Type_contiguous( number_chars_count, MPI_CHAR, &Number )); MPI_Datatype BlockRow; checkMPIErrors(MPI_Type_contiguous( block_shape[0], Number, &BlockRow )); MPI_Datatype BlockPlane; std::vector<int> BlockPlane_blocklengths; std::vector<int> BlockPlane_displacements; for (size_t i = 0; i < block_shape[1]; ++i) { BlockPlane_blocklengths.push_back(1); BlockPlane_displacements.push_back(i * process_grid_shape[0]); } checkMPIErrors(MPI_Type_create_hvector(block_shape[1], 1, process_grid_shape[0] * block_shape[0] * number_chars_count, BlockRow, &BlockPlane)); /* checkMPIErrors(MPI_Type_indexed( */ /* block_shape[1], */ /* BlockPlane_blocklengths.data(), */ /* BlockPlane_displacements.data(), */ /* BlockRow, */ /* &BlockPlane */ /* )); */ MPI_Datatype Block; std::vector<int> Block_blocklengths; std::vector<int> Block_displacements; for (size_t i = 0; i < block_shape[2]; ++i) { Block_blocklengths.push_back(1); Block_displacements.push_back(i * process_grid_shape[1]); } checkMPIErrors(MPI_Type_create_hvector(block_shape[2], 1, process_grid_shape[1] * block_shape[0] * number_chars_count * process_grid_shape[0] * block_shape[1], BlockPlane, &Block)); /* checkMPIErrors(MPI_Type_indexed( */ /* block_shape[2], */ /* Block_blocklengths.data(), */ /* Block_displacements.data(), */ /* BlockPlane, */ /* &Block */ /* )); */ checkMPIErrors(MPI_Type_commit(&Block)); // set view with created type MPI_Offset offset = 0; offset += block_shape[0] * number_chars_count * block_x; offset += block_shape[0] * block_shape[1] * process_grid_shape[0] * number_chars_count * block_y; offset += block_shape[0] * block_shape[1] * block_shape[2] * process_grid_shape[0] * process_grid_shape[1] * number_chars_count * block_z; checkMPIErrors(MPI_File_set_view( file, offset, MPI_CHAR, Block, "native", MPI_INFO_NULL )); // create buffer with data to write std::string buffer; size_t buffer_pos = 0; block_h.resize(block_d.count); block_d.memcpy(block_h.data(), hipMemcpyDeviceToHost); /* for (size_t i = 0; i < block_h.size(); ++i) */ /* { */ /* std::cout << i << std::endl; */ /* block_h[i] = i + block_h.size() * rank; */ /* } */ for (long long k = 0; k < block_shape[2]; ++k) for (long long j = 0; j < block_shape[1]; ++j) for (long long i = 0; i < block_shape[0]; ++i) { buffer.resize(buffer_pos + number_chars_count); sprintf(&buffer[buffer_pos], "%-16e", locate(i, j, k)); buffer_pos += number_chars_count; if (block_x == process_grid_shape[0] - 1 && i == block_shape[0] - 1) { buffer[buffer_pos - 1] = '\n'; if (j == block_shape[1] - 1 && block_y == process_grid_shape[1] - 1 && (block_z != process_grid_shape[2] - 1 || k != block_shape[2] - 1)) buffer[buffer_pos - 2] = '\n'; } } /* buffer[0] = 'R'; */ /* buffer[1] = 'a'; */ /* buffer[2] = 'n'; */ /* buffer[3] = 'k'; */ /* buffer[4] = ' '; */ /* buffer[5] = '0' + rank; */ /* if (rank == 0) { */ /* std::cout << buffer << std::endl; */ /* for (int i = 1; i < 8; i++) { */ /* MPI_Recv(&buffer[0], buffer.size(), MPI_CHAR, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); */ /* std::cout << std::endl << buffer << std::endl; */ /* } */ /* } else { */ /* MPI_Send(&buffer[0], buffer.size(), MPI_CHAR, 0, 0, MPI_COMM_WORLD); */ /* } */ // write data from buffer checkMPIErrors(MPI_File_write_all( file, buffer.data(), buffer.size(), MPI_CHAR, MPI_STATUS_IGNORE )); // close file checkMPIErrors(MPI_File_close(&file)); }
464e3d1584f0013dfda51116d9aaaf21bd91bc13.cu
#include "lab08.cuh" #include <iostream> #include <sstream> #include <iomanip> #include <cmath> #include <fstream> #include <algorithm> #include <iterator> #include <cstdio> #include "MPI_dummy_helper.hpp" #include "dummy_helper.cuh" #include <mpi.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #define GRID_SIZE 1 #define BLOCK_SIZE 4 #define GRID_SIZE_dim3 dim3(GRID_SIZE, GRID_SIZE, GRID_SIZE) #define BLOCK_SIZE_dim3 dim3(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE) #define locate(i, j, k) block_h[(i) + (j) * block_shape[0] + (k) * block_shape[0] * block_shape[1]] #define locate_p(v, i, j, k) v[(i) + (j) * block_shape[0] + (k) * block_shape[0] * block_shape[1]] __global__ void init_array( double *v, long long count, double init_value ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long id = idx + idy * blockDim.x * gridDim.x + idz * blockDim.x * gridDim.x * blockDim.y * gridDim.y; const long long offset = gridDim.x * blockDim.x * gridDim.y * blockDim.y * gridDim.z * blockDim.z; for (long long i = id; i < count; i += offset) v[i] = init_value; } void Lab08::set_device() { int device_count; checkCudaErrors(cudaGetDeviceCount(&device_count)); checkCudaErrors(cudaSetDevice(rank % device_count)); } Lab08::Lab08(int argc, char **argv) { init(argc, argv); checkMPIErrors(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); set_device(); // read input data if (rank == 0) rank_0_init(); else rank_non_0_init(); block_z = rank / process_grid_shape[0] / process_grid_shape[1]; block_y = rank % (process_grid_shape[0] * process_grid_shape[1]) / process_grid_shape[0]; block_x = rank % (process_grid_shape[0] * process_grid_shape[1]) % process_grid_shape[0]; sends_first = (block_x + block_y + block_z) % 2; CudaKernelChecker kernel_checker; auto block_init_routine = [&kernel_checker, this](CudaMemory<double> &v, const char* kernel_name) { v.alloc(block_shape[0] * block_shape[1] * block_shape[2]); init_array<<<GRID_SIZE_dim3, BLOCK_SIZE_dim3>>>( v.get(), block_shape[0] * block_shape[1] * block_shape[2], u_0 ); kernel_checker.check(kernel_name); }; block_init_routine(block_d, "init block_d"); block_init_routine(prev_block_d, "init prev_block_d"); auto boundary_layer_init_routine = [this]( std::vector<double> &v_h, CudaMemory<double> &v_d, const bool layer_not_needed, const long long count ) { v_h.resize( layer_not_needed ? 0 : count); v_d.alloc( layer_not_needed ? 0 : count); }; boundary_layer_init_routine( left_h, left_d, block_x == 0, block_shape[1] * block_shape[2] ); boundary_layer_init_routine( right_h, right_d, block_x == process_grid_shape[0] - 1, block_shape[1] * block_shape[2] ); boundary_layer_init_routine( front_h, front_d, block_y == 0, block_shape[0] * block_shape[2] ); boundary_layer_init_routine( back_h, back_d, block_y == process_grid_shape[1] - 1, block_shape[0] * block_shape[2] ); boundary_layer_init_routine( down_h, down_d, block_z == 0, block_shape[0] * block_shape[1] ); boundary_layer_init_routine( up_h, up_d, block_z == process_grid_shape[2] - 1, block_shape[0] * block_shape[1] ); timer.start(); } void Lab08::init(int argc, char **argv) { int initialized; checkMPIErrors(MPI_Initialized( &initialized )); if (!initialized) { checkMPIErrors(MPI_Init(&argc, &argv)); } } void Lab08::finalize() { int finalized; checkMPIErrors(MPI_Finalized( &finalized )); if (!finalized) { checkMPIErrors(MPI_Barrier(MPI_COMM_WORLD)); checkMPIErrors(MPI_Finalize()); } } void Lab08::rank_0_init() { // input read_in_container(process_grid_shape); read_in_container(block_shape); std::cin >> output_name; std::cin >> eps; read_in_container(l); std::cin >> boundaries.down >> boundaries.up >> boundaries.left >> boundaries.right >> boundaries.front >> boundaries.back; std::cin >> u_0; // input done // send input data to other ranks int n_ranks = process_grid_shape[0] * process_grid_shape[1] * process_grid_shape[2]; for (int rank = 1; rank < n_ranks; ++rank) { checkMPIErrors(MPI_Send( process_grid_shape.data(), process_grid_shape.size(), MPI_LONG_LONG, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( block_shape.data(), block_shape.size(), MPI_LONG_LONG, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( output_name.data(), output_name.size(), MPI_CHAR, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( &eps, 1, MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( l.data(), l.size(), MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( reinterpret_cast<double*>(&boundaries), sizeof(boundaries) / sizeof(double), MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); checkMPIErrors(MPI_Send( &u_0, 1, MPI_DOUBLE, rank, SEND_ANY_TAG, MPI_COMM_WORLD )); } } void Lab08::rank_non_0_init() { int root_rank = 0; checkMPIErrors(MPI_Recv( process_grid_shape.data(), process_grid_shape.size(), MPI_LONG_LONG, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( block_shape.data(), block_shape.size(), MPI_LONG_LONG, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); MPI_Status status; checkMPIErrors(MPI_Probe( root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, &status )); int output_name_count; checkMPIErrors(MPI_Get_count( &status, MPI_CHAR, &output_name_count )); std::vector<char> output_name_buffer(output_name_count); checkMPIErrors(MPI_Recv( output_name_buffer.data(), output_name_buffer.size(), MPI_CHAR, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); std::copy( output_name_buffer.begin(), output_name_buffer.end(), std::back_inserter(output_name) ); checkMPIErrors(MPI_Recv( &eps, 1, MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( l.data(), l.size(), MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( reinterpret_cast<double*>(&boundaries), sizeof(boundaries) / sizeof(double), MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); checkMPIErrors(MPI_Recv( &u_0, 1, MPI_DOUBLE, root_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE )); } int Lab08::block_position_to_rank( long long block_x, long long block_y, long long block_z ) { if (block_x < 0 || block_y < 0 || block_z < 0) { return -1; } if ( block_x >= process_grid_shape[0] || block_y >= process_grid_shape[1] || block_z >= process_grid_shape[2] ) { return -1; } return block_x + block_y * process_grid_shape[0] + block_z * process_grid_shape[0] * process_grid_shape[1]; } void Lab08::send_boundary_layer( std::vector<double> &v_h, CudaMemory<double> &v_d, int destination_rank, Lab08::BoundaryLayerTag tag, std::vector<MPI_Request> &send_requests ) { if (destination_rank > -1) { v_d.memcpy( v_h.data(), cudaMemcpyDeviceToHost ); MPI_Request request; checkMPIErrors(MPI_Isend( v_h.data(), v_h.size(), MPI_DOUBLE, destination_rank, static_cast<int>(tag), MPI_COMM_WORLD, &request )); send_requests.push_back(request); } } void Lab08::send_boundary_layers(std::vector<MPI_Request> &send_requests) { send_boundary_layer( left_h, left_d, block_position_to_rank(block_x - 1, block_y, block_z), BoundaryLayerTag::RIGHT, send_requests ); send_boundary_layer( right_h, right_d, block_position_to_rank(block_x + 1, block_y, block_z), BoundaryLayerTag::LEFT, send_requests ); send_boundary_layer( front_h, front_d, block_position_to_rank(block_x, block_y - 1, block_z), BoundaryLayerTag::BACK, send_requests ); send_boundary_layer( back_h, back_d, block_position_to_rank(block_x, block_y + 1, block_z), BoundaryLayerTag::FRONT, send_requests ); send_boundary_layer( down_h, down_d, block_position_to_rank(block_x, block_y, block_z - 1), BoundaryLayerTag::UP, send_requests ); send_boundary_layer( up_h, up_d, block_position_to_rank(block_x, block_y, block_z + 1), BoundaryLayerTag::DOWN, send_requests ); } void Lab08::receive_boundary_layer( std::vector<double> &v, int source_rank, BoundaryLayerTag tag, std::vector<MPI_Request> &receive_requests ) { if (source_rank > -1) { MPI_Request request; checkMPIErrors(MPI_Irecv( v.data(), v.size(), MPI_DOUBLE, source_rank, static_cast<int>(tag), MPI_COMM_WORLD, &request )); receive_requests.push_back(request); } } void Lab08::receive_boundary_layers( std::vector<double> &left_h, std::vector<double> &right_h, std::vector<double> &front_h, std::vector<double> &back_h, std::vector<double> &down_h, std::vector<double> &up_h, std::vector<MPI_Request> &receive_requests ) { receive_boundary_layer( left_h, block_position_to_rank(block_x - 1, block_y, block_z), BoundaryLayerTag::LEFT, receive_requests ); receive_boundary_layer( right_h, block_position_to_rank(block_x + 1, block_y, block_z), BoundaryLayerTag::RIGHT, receive_requests ); receive_boundary_layer( front_h, block_position_to_rank(block_x, block_y - 1, block_z), BoundaryLayerTag::FRONT, receive_requests ); receive_boundary_layer( back_h, block_position_to_rank(block_x, block_y + 1, block_z), BoundaryLayerTag::BACK, receive_requests ); receive_boundary_layer( down_h, block_position_to_rank(block_x, block_y, block_z - 1), BoundaryLayerTag::DOWN, receive_requests ); receive_boundary_layer( up_h, block_position_to_rank(block_x, block_y, block_z + 1), BoundaryLayerTag::UP, receive_requests ); } __device__ long long block_shape[3]; __global__ void copy_block_to_prev_block( double *block, double *prev_block ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; for (long long k = idz; k < block_shape[2]; k += offset_z) for (long long j = idy; j < block_shape[1]; j += offset_y) for (long long i = idx; i < block_shape[0]; i += offset_x) locate_p(prev_block, i, j, k) = locate_p(block, i, j, k); } __global__ void prev_block_to_abs_difference_array( double *block, double *prev_block ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; for (long long k = idz; k < block_shape[2]; k += offset_z) for (long long j = idy; j < block_shape[1]; j += offset_y) for (long long i = idx; i < block_shape[0]; i += offset_x) locate_p(prev_block, i, j, k) = fabs(locate_p(block, i, j, k) - locate_p(prev_block, i, j, k)); } __global__ void block_iter_process( double *block, double *prev_block, double *left, double *right, double *front, double *back, double *down, double *up, Lab08::Boundaries boundaries, double h_x_pow_minus_2, double h_y_pow_minus_2, double h_z_pow_minus_2, double denominator ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; for (long long k = idz; k < block_shape[2]; k += offset_z) for (long long j = idy; j < block_shape[1]; j += offset_y) for (long long i = idx; i < block_shape[0]; i += offset_x) { double u_left = i == 0 ? (left == nullptr ? boundaries.left : left[ j + block_shape[1] * k]) : locate_p(prev_block, i - 1, j, k), u_right = i == block_shape[0] - 1 ? (right == nullptr ? boundaries.right : right[j + block_shape[1] * k]) : locate_p(prev_block, i + 1, j, k), u_front = j == 0 ? (front == nullptr ? boundaries.front : front[i + block_shape[0] * k]) : locate_p(prev_block, i, j - 1, k), u_back = j == block_shape[1] - 1 ? (back == nullptr ? boundaries.back : back[ i + block_shape[0] * k]) : locate_p(prev_block, i, j + 1, k), u_down = k == 0 ? (down == nullptr ? boundaries.down : down[ i + block_shape[0] * j]) : locate_p(prev_block, i, j, k - 1), u_up = k == block_shape[2] - 1 ? (up == nullptr ? boundaries.up : up[ i + block_shape[0] * j]) : locate_p(prev_block, i, j, k + 1); locate_p(block, i, j, k) = (u_left + u_right) * h_x_pow_minus_2; locate_p(block, i, j, k) += (u_front + u_back ) * h_y_pow_minus_2; locate_p(block, i, j, k) += (u_down + u_up ) * h_z_pow_minus_2; locate_p(block, i, j, k) /= denominator; } } __global__ void init_boundary_layers( double *block, double *left, double *right, double *front, double *back, double *down, double *up ) { const long long idx = blockDim.x * blockIdx.x + threadIdx.x; const long long idy = blockDim.y * blockIdx.y + threadIdx.y; const long long idz = blockDim.z * blockIdx.z + threadIdx.z; const long long offset_x = blockDim.x * gridDim.x; const long long offset_y = blockDim.y * gridDim.y; const long long offset_z = blockDim.z * gridDim.z; #define fill_boundary_layer(v, outer, inner, outer_start, inner_start, outer_offset, inner_offset, loc) \ { \ if (v) \ { \ for (long long i = outer_start; i < block_shape[outer]; i += outer_offset) \ for (long long j = inner_start; j < block_shape[inner]; j += inner_offset) \ v[i * block_shape[inner] + j] = loc; \ } \ } \ if (idx == 0) fill_boundary_layer(left , 2, 1, idz, idy, offset_z, offset_y, locate_p(block, 0, j, i)); if (idx == 1) fill_boundary_layer(right, 2, 1, idz, idy, offset_z, offset_y, locate_p(block, block_shape[0] - 1, j, i)); if (idy == 0) fill_boundary_layer(front, 2, 0, idz, idx, offset_z, offset_x, locate_p(block, j, 0, i)); if (idy == 1) fill_boundary_layer(back , 2, 0, idz, idx, offset_z, offset_x, locate_p(block, j, block_shape[1] - 1, i)); if (idz == 0) fill_boundary_layer(down , 1, 0, idy, idx, offset_y, offset_x, locate_p(block, j, i, 0)); if (idz == 1) fill_boundary_layer(up , 1, 0, idy, idx, offset_y, offset_x, locate_p(block, j, i, block_shape[2] - 1)); #undef fill_boundary_layer } void Lab08::copy_boundary_layers_to_device( std::vector<double> &left_h, std::vector<double> &right_h, std::vector<double> &front_h, std::vector<double> &back_h, std::vector<double> &down_h, std::vector<double> &up_h ) { auto copy_boundary_layer_to_device = []( std::vector<double> &v_h, CudaMemory<double> &v_d ) { v_d.memcpy(v_h.data(), cudaMemcpyHostToDevice); }; copy_boundary_layer_to_device(left_h, left_d); copy_boundary_layer_to_device(right_h, right_d); copy_boundary_layer_to_device(front_h, front_d); copy_boundary_layer_to_device(back_h, back_d); copy_boundary_layer_to_device(down_h, down_d); copy_boundary_layer_to_device(up_h, up_d); } void Lab08::solve() { std::vector<double> left_h (block_x == 0 ? 0 : block_shape[1] * block_shape[2]), right_h(block_x == process_grid_shape[0] - 1 ? 0 : block_shape[1] * block_shape[2]), front_h(block_y == 0 ? 0 : block_shape[0] * block_shape[2]), back_h (block_y == process_grid_shape[1] - 1 ? 0 : block_shape[0] * block_shape[2]), down_h (block_z == 0 ? 0 : block_shape[0] * block_shape[1]), up_h (block_z == process_grid_shape[2] - 1 ? 0 : block_shape[0] * block_shape[1]); double n_x = block_shape[0] * process_grid_shape[0], n_y = block_shape[1] * process_grid_shape[1], n_z = block_shape[2] * process_grid_shape[2]; double h_x_pow_minus_2 = n_x * n_x / l[0] / l[0], h_y_pow_minus_2 = n_y * n_y / l[1] / l[1], h_z_pow_minus_2 = n_z * n_z / l[2] / l[2], denominator = 2 * (h_x_pow_minus_2 + h_y_pow_minus_2 + h_z_pow_minus_2); std::vector<MPI_Request> send_requests, receive_requests; checkCudaErrors(cudaMemcpyToSymbol( ::block_shape, block_shape.data(), block_shape.size() * sizeof(decltype(block_shape[0])), 0, cudaMemcpyHostToDevice )); thrust::device_ptr<double> abs_difference_array = thrust::device_pointer_cast(prev_block_d.get()); while (true) { CudaKernelChecker checker; init_boundary_layers<<<GRID_SIZE_dim3, BLOCK_SIZE_dim3>>>( block_d.get(), left_d.get(), right_d.get(), front_d.get(), back_d.get(), down_d.get(), up_d.get() ); checker.check("init_boundary_layers"); if (sends_first) { send_boundary_layers(send_requests); checkMPIErrors(MPI_Waitall( send_requests.size(), send_requests.data(), MPI_STATUSES_IGNORE )); send_requests.clear(); receive_boundary_layers( left_h, right_h, front_h, back_h, down_h, up_h, receive_requests ); checkMPIErrors(MPI_Waitall( receive_requests.size(), receive_requests.data(), MPI_STATUSES_IGNORE )); receive_requests.clear(); } else { receive_boundary_layers( left_h, right_h, front_h, back_h, down_h, up_h, receive_requests ); checkMPIErrors(MPI_Waitall( receive_requests.size(), receive_requests.data(), MPI_STATUSES_IGNORE )); receive_requests.clear(); send_boundary_layers(send_requests); checkMPIErrors(MPI_Waitall( send_requests.size(), send_requests.data(), MPI_STATUSES_IGNORE )); send_requests.clear(); } copy_boundary_layers_to_device( left_h, right_h, front_h, back_h, down_h, up_h ); copy_block_to_prev_block<<<GRID_SIZE_dim3, BLOCK_SIZE_dim3>>>( block_d.get(), prev_block_d.get() ); checker.check("copy_block_to_prev_block"); block_iter_process<<<GRID_SIZE_dim3, BLOCK_SIZE_dim3>>>( block_d.get(), prev_block_d.get(), left_d.get(), right_d.get(), front_d.get(), back_d.get(), down_d.get(), up_d.get(), boundaries, h_x_pow_minus_2, h_y_pow_minus_2, h_z_pow_minus_2, denominator ); checker.check("iter process kernel"); prev_block_to_abs_difference_array<<<GRID_SIZE_dim3, BLOCK_SIZE_dim3>>>( block_d.get(), prev_block_d.get() ); checker.check("prev_block_to_abs_difference_array"); double max_abs_difference = *thrust::max_element( abs_difference_array, abs_difference_array + prev_block_d.count ); double total_max_abs_difference; checkMPIErrors(MPI_Allreduce( &max_abs_difference, &total_max_abs_difference, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD )); if (total_max_abs_difference < eps) break; } } void Lab08::write_answer() { timer.stop(); if (rank == 0) timer.print_time(); MPI_File file; int delete_error = MPI_File_delete(output_name.c_str(), MPI_INFO_NULL); if (delete_error != 0 && delete_error != MPI_ERR_NO_SUCH_FILE) checkMPIErrors(delete_error); checkMPIErrors(MPI_File_open( MPI_COMM_WORLD, output_name.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file )); // create type MPI_Datatype Number; const int number_chars_count = 16; // 0 . 000000 e+000 ' ' checkMPIErrors(MPI_Type_contiguous( number_chars_count, MPI_CHAR, &Number )); MPI_Datatype BlockRow; checkMPIErrors(MPI_Type_contiguous( block_shape[0], Number, &BlockRow )); MPI_Datatype BlockPlane; std::vector<int> BlockPlane_blocklengths; std::vector<int> BlockPlane_displacements; for (size_t i = 0; i < block_shape[1]; ++i) { BlockPlane_blocklengths.push_back(1); BlockPlane_displacements.push_back(i * process_grid_shape[0]); } checkMPIErrors(MPI_Type_create_hvector(block_shape[1], 1, process_grid_shape[0] * block_shape[0] * number_chars_count, BlockRow, &BlockPlane)); /* checkMPIErrors(MPI_Type_indexed( */ /* block_shape[1], */ /* BlockPlane_blocklengths.data(), */ /* BlockPlane_displacements.data(), */ /* BlockRow, */ /* &BlockPlane */ /* )); */ MPI_Datatype Block; std::vector<int> Block_blocklengths; std::vector<int> Block_displacements; for (size_t i = 0; i < block_shape[2]; ++i) { Block_blocklengths.push_back(1); Block_displacements.push_back(i * process_grid_shape[1]); } checkMPIErrors(MPI_Type_create_hvector(block_shape[2], 1, process_grid_shape[1] * block_shape[0] * number_chars_count * process_grid_shape[0] * block_shape[1], BlockPlane, &Block)); /* checkMPIErrors(MPI_Type_indexed( */ /* block_shape[2], */ /* Block_blocklengths.data(), */ /* Block_displacements.data(), */ /* BlockPlane, */ /* &Block */ /* )); */ checkMPIErrors(MPI_Type_commit(&Block)); // set view with created type MPI_Offset offset = 0; offset += block_shape[0] * number_chars_count * block_x; offset += block_shape[0] * block_shape[1] * process_grid_shape[0] * number_chars_count * block_y; offset += block_shape[0] * block_shape[1] * block_shape[2] * process_grid_shape[0] * process_grid_shape[1] * number_chars_count * block_z; checkMPIErrors(MPI_File_set_view( file, offset, MPI_CHAR, Block, "native", MPI_INFO_NULL )); // create buffer with data to write std::string buffer; size_t buffer_pos = 0; block_h.resize(block_d.count); block_d.memcpy(block_h.data(), cudaMemcpyDeviceToHost); /* for (size_t i = 0; i < block_h.size(); ++i) */ /* { */ /* std::cout << i << std::endl; */ /* block_h[i] = i + block_h.size() * rank; */ /* } */ for (long long k = 0; k < block_shape[2]; ++k) for (long long j = 0; j < block_shape[1]; ++j) for (long long i = 0; i < block_shape[0]; ++i) { buffer.resize(buffer_pos + number_chars_count); sprintf(&buffer[buffer_pos], "%-16e", locate(i, j, k)); buffer_pos += number_chars_count; if (block_x == process_grid_shape[0] - 1 && i == block_shape[0] - 1) { buffer[buffer_pos - 1] = '\n'; if (j == block_shape[1] - 1 && block_y == process_grid_shape[1] - 1 && (block_z != process_grid_shape[2] - 1 || k != block_shape[2] - 1)) buffer[buffer_pos - 2] = '\n'; } } /* buffer[0] = 'R'; */ /* buffer[1] = 'a'; */ /* buffer[2] = 'n'; */ /* buffer[3] = 'k'; */ /* buffer[4] = ' '; */ /* buffer[5] = '0' + rank; */ /* if (rank == 0) { */ /* std::cout << buffer << std::endl; */ /* for (int i = 1; i < 8; i++) { */ /* MPI_Recv(&buffer[0], buffer.size(), MPI_CHAR, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); */ /* std::cout << std::endl << buffer << std::endl; */ /* } */ /* } else { */ /* MPI_Send(&buffer[0], buffer.size(), MPI_CHAR, 0, 0, MPI_COMM_WORLD); */ /* } */ // write data from buffer checkMPIErrors(MPI_File_write_all( file, buffer.data(), buffer.size(), MPI_CHAR, MPI_STATUS_IGNORE )); // close file checkMPIErrors(MPI_File_close(&file)); }
21a2390523ec5438ddf21ad9d91b04ae044ec944.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/device_mdarray.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/matrix/copy.cuh> #include <raft/random/rng.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> namespace raft { namespace matrix { template <typename T> struct MatrixInputs { T tolerance; int n_row; int n_col; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MatrixInputs<T>& dims) { return os; } template <typename T> class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> { public: MatrixTest() : params(::testing::TestWithParam<MatrixInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), in1(params.n_row * params.n_col, stream), in2(params.n_row * params.n_col, stream), in1_revr(params.n_row * params.n_col, stream) { } protected: void SetUp() override { raft::random::RngState r(params.seed); int len = params.n_row * params.n_col; uniform(handle, r, in1.data(), len, T(-1.0), T(1.0)); auto in1_view = raft::make_device_matrix_view<const T, int, col_major>( in1.data(), params.n_row, params.n_col); auto in2_view = raft::make_device_matrix_view<T, int, col_major>(in2.data(), params.n_row, params.n_col); copy<T, int>(handle, in1_view, in2_view); // copy(in1, in1_revr, params.n_row, params.n_col); // colReverse(in1_revr, params.n_row, params.n_col); rmm::device_uvector<T> outTrunc(6, stream); auto out_trunc_view = raft::make_device_matrix_view<T, int, col_major>(outTrunc.data(), 3, 2); trunc_zero_origin<T, int>(handle, in1_view, out_trunc_view); resource::sync_stream(handle, stream); } protected: raft::resources handle; hipStream_t stream; MatrixInputs<T> params; rmm::device_uvector<T> in1, in2, in1_revr; }; const std::vector<MatrixInputs<float>> inputsf2 = {{0.000001f, 4, 4, 1234ULL}}; const std::vector<MatrixInputs<double>> inputsd2 = {{0.00000001, 4, 4, 1234ULL}}; typedef MatrixTest<float> MatrixTestF; TEST_P(MatrixTestF, Result) { ASSERT_TRUE(raft::devArrMatch(in1.data(), in2.data(), params.n_row * params.n_col, raft::CompareApprox<float>(params.tolerance), stream)); } typedef MatrixTest<double> MatrixTestD; TEST_P(MatrixTestD, Result) { ASSERT_TRUE(raft::devArrMatch(in1.data(), in2.data(), params.n_row * params.n_col, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestD, ::testing::ValuesIn(inputsd2)); template <typename T> class MatrixCopyRowsTest : public ::testing::Test { using math_t = typename std::tuple_element<0, T>::type; using idx_t = typename std::tuple_element<1, T>::type; using idx_array_t = typename std::tuple_element<2, T>::type; protected: MatrixCopyRowsTest() : stream(resource::get_cuda_stream(handle)), input(n_cols * n_rows, resource::get_cuda_stream(handle)), indices(n_selected, resource::get_cuda_stream(handle)), output(n_cols * n_selected, resource::get_cuda_stream(handle)) { raft::update_device(indices.data(), indices_host, n_selected, stream); // Init input array thrust::counting_iterator<idx_t> first(0); thrust::device_ptr<math_t> ptr(input.data()); thrust::copy(resource::get_thrust_policy(handle), first, first + n_cols * n_rows, ptr); } void testCopyRows() { auto input_view = raft::make_device_matrix_view<const math_t, idx_array_t, col_major>( input.data(), n_rows, n_cols); auto output_view = raft::make_device_matrix_view<math_t, idx_array_t, col_major>( output.data(), n_selected, n_cols); auto indices_view = raft::make_device_vector_view<const idx_array_t, idx_array_t>(indices.data(), n_selected); raft::matrix::copy_rows(handle, input_view, output_view, indices_view); EXPECT_TRUE(raft::devArrMatchHost( output_exp_colmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>(), stream)); auto input_row_view = raft::make_device_matrix_view<const math_t, idx_array_t, row_major>( input.data(), n_rows, n_cols); auto output_row_view = raft::make_device_matrix_view<math_t, idx_array_t, row_major>( output.data(), n_selected, n_cols); raft::matrix::copy_rows(handle, input_row_view, output_row_view, indices_view); EXPECT_TRUE(raft::devArrMatchHost( output_exp_rowmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>(), stream)); } protected: raft::resources handle; hipStream_t stream; int n_rows = 10; int n_cols = 3; int n_selected = 5; idx_array_t indices_host[5] = {0, 3, 4, 7, 9}; math_t output_exp_colmajor[15] = {0, 3, 4, 7, 9, 10, 13, 14, 17, 19, 20, 23, 24, 27, 29}; math_t output_exp_rowmajor[15] = {0, 1, 2, 9, 10, 11, 12, 13, 14, 21, 22, 23, 27, 28, 29}; rmm::device_uvector<math_t> input; rmm::device_uvector<math_t> output; rmm::device_uvector<idx_array_t> indices; }; using TypeTuple = ::testing::Types<std::tuple<float, int, int>, std::tuple<float, int64_t, int>, std::tuple<double, int, int>, std::tuple<double, int64_t, int>>; TYPED_TEST_CASE(MatrixCopyRowsTest, TypeTuple); TYPED_TEST(MatrixCopyRowsTest, CopyRows) { this->testCopyRows(); } } // namespace matrix } // namespace raft
21a2390523ec5438ddf21ad9d91b04ae044ec944.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/device_mdarray.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/matrix/copy.cuh> #include <raft/random/rng.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> namespace raft { namespace matrix { template <typename T> struct MatrixInputs { T tolerance; int n_row; int n_col; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MatrixInputs<T>& dims) { return os; } template <typename T> class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> { public: MatrixTest() : params(::testing::TestWithParam<MatrixInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), in1(params.n_row * params.n_col, stream), in2(params.n_row * params.n_col, stream), in1_revr(params.n_row * params.n_col, stream) { } protected: void SetUp() override { raft::random::RngState r(params.seed); int len = params.n_row * params.n_col; uniform(handle, r, in1.data(), len, T(-1.0), T(1.0)); auto in1_view = raft::make_device_matrix_view<const T, int, col_major>( in1.data(), params.n_row, params.n_col); auto in2_view = raft::make_device_matrix_view<T, int, col_major>(in2.data(), params.n_row, params.n_col); copy<T, int>(handle, in1_view, in2_view); // copy(in1, in1_revr, params.n_row, params.n_col); // colReverse(in1_revr, params.n_row, params.n_col); rmm::device_uvector<T> outTrunc(6, stream); auto out_trunc_view = raft::make_device_matrix_view<T, int, col_major>(outTrunc.data(), 3, 2); trunc_zero_origin<T, int>(handle, in1_view, out_trunc_view); resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; MatrixInputs<T> params; rmm::device_uvector<T> in1, in2, in1_revr; }; const std::vector<MatrixInputs<float>> inputsf2 = {{0.000001f, 4, 4, 1234ULL}}; const std::vector<MatrixInputs<double>> inputsd2 = {{0.00000001, 4, 4, 1234ULL}}; typedef MatrixTest<float> MatrixTestF; TEST_P(MatrixTestF, Result) { ASSERT_TRUE(raft::devArrMatch(in1.data(), in2.data(), params.n_row * params.n_col, raft::CompareApprox<float>(params.tolerance), stream)); } typedef MatrixTest<double> MatrixTestD; TEST_P(MatrixTestD, Result) { ASSERT_TRUE(raft::devArrMatch(in1.data(), in2.data(), params.n_row * params.n_col, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestD, ::testing::ValuesIn(inputsd2)); template <typename T> class MatrixCopyRowsTest : public ::testing::Test { using math_t = typename std::tuple_element<0, T>::type; using idx_t = typename std::tuple_element<1, T>::type; using idx_array_t = typename std::tuple_element<2, T>::type; protected: MatrixCopyRowsTest() : stream(resource::get_cuda_stream(handle)), input(n_cols * n_rows, resource::get_cuda_stream(handle)), indices(n_selected, resource::get_cuda_stream(handle)), output(n_cols * n_selected, resource::get_cuda_stream(handle)) { raft::update_device(indices.data(), indices_host, n_selected, stream); // Init input array thrust::counting_iterator<idx_t> first(0); thrust::device_ptr<math_t> ptr(input.data()); thrust::copy(resource::get_thrust_policy(handle), first, first + n_cols * n_rows, ptr); } void testCopyRows() { auto input_view = raft::make_device_matrix_view<const math_t, idx_array_t, col_major>( input.data(), n_rows, n_cols); auto output_view = raft::make_device_matrix_view<math_t, idx_array_t, col_major>( output.data(), n_selected, n_cols); auto indices_view = raft::make_device_vector_view<const idx_array_t, idx_array_t>(indices.data(), n_selected); raft::matrix::copy_rows(handle, input_view, output_view, indices_view); EXPECT_TRUE(raft::devArrMatchHost( output_exp_colmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>(), stream)); auto input_row_view = raft::make_device_matrix_view<const math_t, idx_array_t, row_major>( input.data(), n_rows, n_cols); auto output_row_view = raft::make_device_matrix_view<math_t, idx_array_t, row_major>( output.data(), n_selected, n_cols); raft::matrix::copy_rows(handle, input_row_view, output_row_view, indices_view); EXPECT_TRUE(raft::devArrMatchHost( output_exp_rowmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>(), stream)); } protected: raft::resources handle; cudaStream_t stream; int n_rows = 10; int n_cols = 3; int n_selected = 5; idx_array_t indices_host[5] = {0, 3, 4, 7, 9}; math_t output_exp_colmajor[15] = {0, 3, 4, 7, 9, 10, 13, 14, 17, 19, 20, 23, 24, 27, 29}; math_t output_exp_rowmajor[15] = {0, 1, 2, 9, 10, 11, 12, 13, 14, 21, 22, 23, 27, 28, 29}; rmm::device_uvector<math_t> input; rmm::device_uvector<math_t> output; rmm::device_uvector<idx_array_t> indices; }; using TypeTuple = ::testing::Types<std::tuple<float, int, int>, std::tuple<float, int64_t, int>, std::tuple<double, int, int>, std::tuple<double, int64_t, int>>; TYPED_TEST_CASE(MatrixCopyRowsTest, TypeTuple); TYPED_TEST(MatrixCopyRowsTest, CopyRows) { this->testCopyRows(); } } // namespace matrix } // namespace raft
2b375ca1064061439fdc87fb32d664cc9434d26e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "utils.h" static void swap_boards(bboard** a, bboard** b) { bboard* t; t = *a; *a = *b; *b = t; } static inline void cudaCheckErrors(const char msg[], const char file[], int line) { do { hipError_t __err = hipGetLastError(); if (__err != hipSuccess) { fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", msg, hipGetErrorString(__err), file, line); hipDeviceReset(); exit(1); } } while (0); } #define DEFAULT_OPTX 256 #define DEFAULT_OPTY 1 void best_block_size(int* optx, int* opty) { #ifdef CUDA_OCC_OPT // The launch configurator returned block size int block_size = 0; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int min_grid_size = 0; hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, (void*)calculate_next_generation, 0, 0); *optx = block_size; *opty = 1; #else *optx = DEFAULT_OPTX; *opty = DEFAULT_OPTY; #endif } #ifndef TESTING int main(int argc, char** argv) { if (argc < 3) { printf("usage: %s fname dim (iter blockx blocky gridx gridy)\n", argv[0]); exit(1); } int n_runs = DFL_RUNS; if (argc >= 4) { n_runs = atoi(argv[3]); } const int dim = atoi(argv[2]); const size_t total_elements = dim * dim; const size_t mem_size = total_elements * sizeof(int); dim3 block; dim3 grid; const int dim_board_h = CEIL_DIV(dim, HEIGHT); const int dim_board_w = CEIL_DIV(dim, WIDTH); if (argc >= 7) { block.x = atoi(argv[4]); block.y = atoi(argv[5]); grid.x = atoi(argv[6]); grid.y = atoi(argv[7]); } else { int optx, opty; best_block_size(&optx, &opty); fprintf(stderr, "opt=%d %d\n", optx, opty); block.x = optx; block.y = opty; grid.x = CEIL_DIV(dim_board_h, block.x); grid.y = CEIL_DIV(dim_board_w, block.y); } hipFree(0); // init device const int remaining_dim_h = grid.x * block.x * HEIGHT - dim; const int remaining_dim_w = grid.y * block.y * WIDTH - dim; const int remaining_cells_h = remaining_dim_h % HEIGHT; const int remaining_cells_w = remaining_dim_w % WIDTH; char* filename = argv[1]; fprintf(stderr, "%s: Reading %dx%d table from file %s\n", argv[0], dim, dim, filename); fprintf(stderr, "%s: Running on a grid(%d, %d) with a block(%d, %d):\nFilename: %s with dim %d for %d iterations\n", argv[0], grid.x, grid.y, block.x, block.y, filename, dim, n_runs); int* table; table = (int*) malloc(mem_size); read_from_file(table, filename, dim); int* d_table; hipMalloc((void**) &d_table, mem_size); cudaCheckErrors("device allocation of GOL matrix failed", __FILE__, __LINE__); bboard* d_board; bboard* d_help; size_t pitch; hipMallocPitch((void**)&d_board, &pitch, dim_board_w * sizeof(bboard), dim_board_h); cudaCheckErrors("device pitch allocation of GOL matrix failed", __FILE__, __LINE__); hipMallocPitch((void**)&d_help, &pitch, dim_board_w * sizeof(bboard), dim_board_h); cudaCheckErrors("device pitch allocation of GOL matrix failed", __FILE__, __LINE__); hipMemcpy(d_table, table, mem_size, hipMemcpyHostToDevice); cudaCheckErrors("copy from host to device memory failed", __FILE__, __LINE__); free(table); hipLaunchKernelGGL(( convert_to_tiled) , dim3(grid), dim3(block) , 0, 0, d_table, d_board, dim, pitch); cudaCheckErrors("convert_to_tiled failed", __FILE__, __LINE__); // start timewatch float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for (int i = 0; i < n_runs; ++i) { hipLaunchKernelGGL(( calculate_next_generation) , dim3(grid), dim3(block), (block.x + 2) * sizeof(bboard) , 0, d_board, d_help, dim, dim_board_w, dim_board_h, pitch, remaining_cells_w, remaining_cells_h ); cudaCheckErrors("calculating next generation failed", __FILE__, __LINE__); swap_boards(&d_board, &d_help); } hipStreamSynchronize(0); // end timewatch hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("%f\n", time); hipLaunchKernelGGL(( convert_from_tiled) , dim3(grid), dim3(block) , 0, 0, d_table, d_board, dim, pitch); cudaCheckErrors("convert_from_tiled failed", __FILE__, __LINE__); table = (int*) malloc(mem_size); hipMemcpy(table, d_table, mem_size, hipMemcpyDeviceToHost); cudaCheckErrors("copy from device to host memory failed", __FILE__, __LINE__); // print_table(table, dim); save_table(table, dim, "test_results.bin"); // reset gpu hipDeviceReset(); return 0; } #endif
2b375ca1064061439fdc87fb32d664cc9434d26e.cu
#include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <cuda_runtime.h> #include "utils.h" static void swap_boards(bboard** a, bboard** b) { bboard* t; t = *a; *a = *b; *b = t; } static inline void cudaCheckErrors(const char msg[], const char file[], int line) { do { cudaError_t __err = cudaGetLastError(); if (__err != cudaSuccess) { fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", msg, cudaGetErrorString(__err), file, line); cudaDeviceReset(); exit(1); } } while (0); } #define DEFAULT_OPTX 256 #define DEFAULT_OPTY 1 void best_block_size(int* optx, int* opty) { #ifdef CUDA_OCC_OPT // The launch configurator returned block size int block_size = 0; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int min_grid_size = 0; cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, (void*)calculate_next_generation, 0, 0); *optx = block_size; *opty = 1; #else *optx = DEFAULT_OPTX; *opty = DEFAULT_OPTY; #endif } #ifndef TESTING int main(int argc, char** argv) { if (argc < 3) { printf("usage: %s fname dim (iter blockx blocky gridx gridy)\n", argv[0]); exit(1); } int n_runs = DFL_RUNS; if (argc >= 4) { n_runs = atoi(argv[3]); } const int dim = atoi(argv[2]); const size_t total_elements = dim * dim; const size_t mem_size = total_elements * sizeof(int); dim3 block; dim3 grid; const int dim_board_h = CEIL_DIV(dim, HEIGHT); const int dim_board_w = CEIL_DIV(dim, WIDTH); if (argc >= 7) { block.x = atoi(argv[4]); block.y = atoi(argv[5]); grid.x = atoi(argv[6]); grid.y = atoi(argv[7]); } else { int optx, opty; best_block_size(&optx, &opty); fprintf(stderr, "opt=%d %d\n", optx, opty); block.x = optx; block.y = opty; grid.x = CEIL_DIV(dim_board_h, block.x); grid.y = CEIL_DIV(dim_board_w, block.y); } cudaFree(0); // init device const int remaining_dim_h = grid.x * block.x * HEIGHT - dim; const int remaining_dim_w = grid.y * block.y * WIDTH - dim; const int remaining_cells_h = remaining_dim_h % HEIGHT; const int remaining_cells_w = remaining_dim_w % WIDTH; char* filename = argv[1]; fprintf(stderr, "%s: Reading %dx%d table from file %s\n", argv[0], dim, dim, filename); fprintf(stderr, "%s: Running on a grid(%d, %d) with a block(%d, %d):\nFilename: %s with dim %d for %d iterations\n", argv[0], grid.x, grid.y, block.x, block.y, filename, dim, n_runs); int* table; table = (int*) malloc(mem_size); read_from_file(table, filename, dim); int* d_table; cudaMalloc((void**) &d_table, mem_size); cudaCheckErrors("device allocation of GOL matrix failed", __FILE__, __LINE__); bboard* d_board; bboard* d_help; size_t pitch; cudaMallocPitch((void**)&d_board, &pitch, dim_board_w * sizeof(bboard), dim_board_h); cudaCheckErrors("device pitch allocation of GOL matrix failed", __FILE__, __LINE__); cudaMallocPitch((void**)&d_help, &pitch, dim_board_w * sizeof(bboard), dim_board_h); cudaCheckErrors("device pitch allocation of GOL matrix failed", __FILE__, __LINE__); cudaMemcpy(d_table, table, mem_size, cudaMemcpyHostToDevice); cudaCheckErrors("copy from host to device memory failed", __FILE__, __LINE__); free(table); convert_to_tiled <<< grid, block >>> (d_table, d_board, dim, pitch); cudaCheckErrors("convert_to_tiled failed", __FILE__, __LINE__); // start timewatch float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i = 0; i < n_runs; ++i) { calculate_next_generation <<< grid, block, (block.x + 2) * sizeof(bboard) >>> (d_board, d_help, dim, dim_board_w, dim_board_h, pitch, remaining_cells_w, remaining_cells_h ); cudaCheckErrors("calculating next generation failed", __FILE__, __LINE__); swap_boards(&d_board, &d_help); } cudaStreamSynchronize(0); // end timewatch cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("%f\n", time); convert_from_tiled <<< grid, block >>> (d_table, d_board, dim, pitch); cudaCheckErrors("convert_from_tiled failed", __FILE__, __LINE__); table = (int*) malloc(mem_size); cudaMemcpy(table, d_table, mem_size, cudaMemcpyDeviceToHost); cudaCheckErrors("copy from device to host memory failed", __FILE__, __LINE__); // print_table(table, dim); save_table(table, dim, "test_results.bin"); // reset gpu cudaDeviceReset(); return 0; } #endif
7585ae37db845c3d72867067b861aa6c2cf3e14f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { /////////////////////////////////////////////////////////////////////// template<typename T> __device__ void concatKernelHStack(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo) { // we expect all data coming in as vectors, and z as 2D matrix // the only significant difference here is the fact that input lengths might be different auto z = reinterpret_cast<T *>(vz); auto inputShapes = (Nd4jLong **) inputShapeInfos; T **input = (T **) data; __shared__ int inputEWS; __shared__ int resultEWS; __shared__ int inputLength; if (threadIdx.x == 0) { resultEWS = shape::elementWiseStride(zShapeInfo); } __syncthreads(); for (int r = blockIdx.x; r < numArrays; r += gridDim.x) { __shared__ int baseIdx; if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(inputShapes[f]); } } __syncthreads(); T *inputData = (T *) input[r]; if (threadIdx.x == 0) { inputEWS = shape::elementWiseStride(inputShapes[r]); inputLength = shape::length(inputShapes[r]); } __syncthreads(); for (int i = threadIdx.x; i < inputLength; i += blockDim.x) { z[baseIdx + i * resultEWS] = inputData[i * inputEWS]; } __syncthreads(); } } /////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execConcatKernelHStack(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo) { concatKernelHStack<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////////// template<typename T> __host__ void concatKernelHStackGeneric(dim3 &launchDims, hipStream_t *stream, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo) { hipLaunchKernelGGL(( execConcatKernelHStack<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, numArrays, data, inputShapeInfos, vz, zShapeInfo); sd::DebugHelper::checkErrorCode(stream, "concatHStack(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL concatKernelHStackGeneric, (dim3 & launchDims, hipStream_t * stream, int numArrays, Nd4jPointer * data, Nd4jPointer * inputShapeInfos, void * vz, Nd4jLong * zShapeInfo), LIBND4J_TYPES); }
7585ae37db845c3d72867067b861aa6c2cf3e14f.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { /////////////////////////////////////////////////////////////////////// template<typename T> __device__ void concatKernelHStack(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo) { // we expect all data coming in as vectors, and z as 2D matrix // the only significant difference here is the fact that input lengths might be different auto z = reinterpret_cast<T *>(vz); auto inputShapes = (Nd4jLong **) inputShapeInfos; T **input = (T **) data; __shared__ int inputEWS; __shared__ int resultEWS; __shared__ int inputLength; if (threadIdx.x == 0) { resultEWS = shape::elementWiseStride(zShapeInfo); } __syncthreads(); for (int r = blockIdx.x; r < numArrays; r += gridDim.x) { __shared__ int baseIdx; if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(inputShapes[f]); } } __syncthreads(); T *inputData = (T *) input[r]; if (threadIdx.x == 0) { inputEWS = shape::elementWiseStride(inputShapes[r]); inputLength = shape::length(inputShapes[r]); } __syncthreads(); for (int i = threadIdx.x; i < inputLength; i += blockDim.x) { z[baseIdx + i * resultEWS] = inputData[i * inputEWS]; } __syncthreads(); } } /////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execConcatKernelHStack(int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo) { concatKernelHStack<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////////// template<typename T> __host__ void concatKernelHStackGeneric(dim3 &launchDims, cudaStream_t *stream, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, void *vz, Nd4jLong *zShapeInfo) { execConcatKernelHStack<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(numArrays, data, inputShapeInfos, vz, zShapeInfo); sd::DebugHelper::checkErrorCode(stream, "concatHStack(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL concatKernelHStackGeneric, (dim3 & launchDims, cudaStream_t * stream, int numArrays, Nd4jPointer * data, Nd4jPointer * inputShapeInfos, void * vz, Nd4jLong * zShapeInfo), LIBND4J_TYPES); }
c3188cd3f334ac6d986d1f375d9a8801292d5b80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <cmath> #include <vector> #include <iostream> #include <fstream> #include <stdio.h> // Block and grid size defines. // Seperate defines are really just for future convenience... #define CSV_NAME "fd_data_ph.csv" #define BLOCK_SIZE 256 #define GRID_SIZE 256 #define SEP ";" // #define DEBUG template <typename T> void printContainer(T container, const int size, const int only) { if (only){ for (int i = 0; i < only; ++i) std::cout << container[i] << " | "; std::cout << " ... "; for (int i = size - only; i < size; ++i) std::cout << container[i] << " | "; } else { for (int i = 0; i < size; ++i) std::cout << container[i] << " | "; } std::cout << std::endl; } // y = A * x __global__ void cuda_csr_matvec_product(int N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *x, double *y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { double sum = 0; for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) { sum += csr_values[k] * x[csr_colindices[k]]; } y[i] = sum; } } // x <- x + alpha * y __global__ void cuda_vecadd(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] += alpha * y[i]; } // x <- y + alpha * x __global__ void cuda_vecadd2(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] = y[i] + alpha * x[i]; } // result = (x, y) __global__ void cuda_dot_product(int N, double *x, double *y, double *result) { __shared__ double shared_mem[BLOCK_SIZE]; double dot = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { dot += x[i] * y[i]; } shared_mem[threadIdx.x] = dot; for (int k = blockDim.x / 2; k > 0; k /= 2) { __syncthreads(); if (threadIdx.x < k) { shared_mem[threadIdx.x] += shared_mem[threadIdx.x + k]; } } if (threadIdx.x == 0) atomicAdd(result, shared_mem[0]); } __global__ void part1(int N, double* x, double* r, double *p, double *Ap, double alpha, double beta) { // lines 2 , 3 + 4 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { x[i] = x[i] + alpha * p[i]; double r_tmp = r[i] - alpha * Ap[i]; r[i] = r_tmp; //} // Merge these two? //for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { p[i] = r_tmp + beta * p[i]; } } __global__ void part2(int N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double* r, double *p, double *Ap, double* ApAp, double* pAp, double* rr ) { __shared__ double shared_mem_ApAp[BLOCK_SIZE]; __shared__ double shared_mem_pAp[BLOCK_SIZE]; __shared__ double shared_mem_rr[BLOCK_SIZE]; // Mat-vec product double dot_ApAp = 0., dot_pAp = 0., dot_rr = 0.; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { double sum = 0; for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) { sum += csr_values[k] * p[csr_colindices[k]]; } Ap[i] = sum; dot_ApAp += sum*sum; dot_pAp += p[i]*sum; dot_rr += r[i]*r[i]; } // now : // Ap = Ap_i --> Line 5 // we are ready for reductions shared_mem_ApAp[threadIdx.x] = dot_ApAp; shared_mem_pAp[threadIdx.x] = dot_pAp; shared_mem_rr[threadIdx.x] = dot_rr; for (int k = blockDim.x / 2; k > 0; k /= 2) { __syncthreads(); if (threadIdx.x < k) { shared_mem_ApAp[threadIdx.x] += shared_mem_ApAp[threadIdx.x + k]; shared_mem_pAp[threadIdx.x] += shared_mem_pAp[threadIdx.x + k]; shared_mem_rr[threadIdx.x] += shared_mem_rr[threadIdx.x + k]; } } if (threadIdx.x == 0) { atomicAdd(ApAp, shared_mem_ApAp[0]); atomicAdd(pAp, shared_mem_pAp[0]); atomicAdd(rr, shared_mem_rr[0]); } // now: // ApAp, pAp, rr --> Line 6 } __global__ void scan_kernel_1(int const *X, int *Y, int N, int *carries) { __shared__ int shared_buffer[BLOCK_SIZE]; int my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(int *carries) { __shared__ int shared_buffer[BLOCK_SIZE]; // load data: int my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(int *Y, int N, int const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ int shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } __global__ void count_nz(int* row_offsets, int N, int M) { for(int row = blockDim.x * blockIdx.x + threadIdx.x; row < N * M; row += gridDim.x * blockDim.x) { int nz_for_this_row = 1; int i = row / N; int j = row % N; if(i > 0) nz_for_this_row += 1; if(j > 0) nz_for_this_row += 1; if(i < N-1) nz_for_this_row += 1; if(j < M-1) nz_for_this_row += 1; row_offsets[row] = nz_for_this_row; } } __global__ void assembleA(double* values, int* columns, int* row_offsets, int N, int M) { for(int row = blockDim.x * blockIdx.x + threadIdx.x; row < N*M; row += gridDim.x * blockDim.x) { int i = row / N; int j = row % N; int counter = 0; if ( i > 0) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = (i-1)*N+j; counter++; } if ( j > 0) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = i*N+(j-1); counter++; } values[(int)row_offsets[row] + counter] = 4; columns[(int)row_offsets[row] + counter] = i*N+j; counter++; if ( j < M-1) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = i*N+(j+1); counter++; } if ( i < N-1) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = (i+1)*N+j; counter++; } } } void exclusive_scan(int const * input, int * output, int N) { int *carries; hipMalloc(&carries, sizeof(int) * GRID_SIZE); // First step: Scan within each thread group and write carrieshipLaunchKernelGGL(( scan_kernel_1), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group)hipLaunchKernelGGL(( scan_kernel_2), dim3(1), dim3(GRID_SIZE), 0, 0, carries); // Third step: Offset each thread group accordinglyhipLaunchKernelGGL(( scan_kernel_3), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, output, N, carries); hipFree(carries); } int conjugate_gradient(int N, // number of unknows int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *rhs, double *solution) //, double *init_guess) // feel free to add a nonzero initial guess as needed { // initialize timer Timer timer; // clear solution vector (it may contain garbage values): std::fill(solution, solution + N, 0); // initialize work vectors: double alpha, beta, pAp, ApAp, rr; double* cuda_pAp, *cuda_ApAp, *cuda_rr; double* cuda_x, *cuda_p, *cuda_r, *cuda_Ap; hipMalloc(&cuda_p, sizeof(double) * N); hipMalloc(&cuda_r, sizeof(double) * N); hipMalloc(&cuda_Ap, sizeof(double) * N); hipMalloc(&cuda_x, sizeof(double) * N); hipMalloc(&cuda_pAp, sizeof(double)); hipMalloc(&cuda_ApAp, sizeof(double)); hipMalloc(&cuda_rr, sizeof(double)); hipMemcpy(cuda_p, rhs, sizeof(double) * N, hipMemcpyHostToDevice); hipMemcpy(cuda_r, rhs, sizeof(double) * N, hipMemcpyHostToDevice); hipMemcpy(cuda_x, solution, sizeof(double) * N, hipMemcpyHostToDevice); const double zero = 0; hipMemcpy(cuda_pAp, &zero, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cuda_ApAp, &zero, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cuda_rr, &zero, sizeof(double), hipMemcpyHostToDevice); // Initial values: i = 0 // device hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_r, cuda_r, cuda_rr); hipLaunchKernelGGL(( cuda_csr_matvec_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, csr_rowoffsets, csr_colindices, csr_values, cuda_p, cuda_Ap); hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_p, cuda_Ap, cuda_pAp); hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_Ap, cuda_Ap, cuda_ApAp); hipMemcpy(&rr, cuda_rr, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&pAp, cuda_pAp, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&ApAp, cuda_ApAp, sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // host side of things double initial_residual_squared = rr; #ifdef DEBUG std::cout << "Initial residual norm: " << initial_residual_squared << std::endl; #endif alpha = rr / pAp; //beta = (alpha*alpha * ApAp - rr) / rr; beta = alpha * alpha * ApAp / rr - 1; int iters = 1; hipDeviceSynchronize(); timer.reset(); while (1) { hipLaunchKernelGGL(( part1), dim3(BLOCK_SIZE), dim3(GRID_SIZE), 0, 0, N, cuda_x, cuda_r, cuda_p, cuda_Ap, alpha, beta); hipMemcpy(cuda_pAp, &zero, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cuda_ApAp, &zero, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cuda_rr, &zero, sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( part2), dim3(BLOCK_SIZE), dim3(GRID_SIZE), 0, 0, N, csr_rowoffsets, csr_colindices, csr_values, cuda_r, cuda_p, cuda_Ap, cuda_ApAp, cuda_pAp, cuda_rr); hipDeviceSynchronize(); hipMemcpy(&rr, cuda_rr, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&pAp, cuda_pAp, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&ApAp, cuda_ApAp, sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // line 10: double rel_norm = std::sqrt(rr / initial_residual_squared); if (rel_norm < 1e-6) { break; } alpha = rr / pAp; //beta = (alpha*alpha * ApAp - rr) / rr; beta = alpha * alpha * ApAp / rr - 1; #ifdef DEBUG if (iters%100==0) { std::cout << "Norm after " << iters << " iterations:\n" << "rel. norm: " << rel_norm << "\n" << "abs. norm: " << std::sqrt(beta) << std::endl; } #endif if (iters > 10000) break; // solver didn't converge ++iters; } hipMemcpy(solution, cuda_x, sizeof(double) * N, hipMemcpyDeviceToHost); hipDeviceSynchronize(); #ifdef DEBUG std::cout << "Time elapsed: " << timer.get() << " (" << timer.get() / iters << " per iteration)" << std::endl; #endif if (iters > 10000) std::cout << "Conjugate Gradient did NOT converge within 10000 iterations" << std::endl; else std::cout << "Conjugate Gradient converged in " << iters << " iterations." << std::endl; // Vectors hipFree(cuda_x); hipFree(cuda_p); hipFree(cuda_r); hipFree(cuda_Ap); // Scalers hipFree(cuda_pAp); hipFree(cuda_ApAp); hipFree(cuda_rr); return iters; } int main() { std::string csv_name = CSV_NAME; std::cout << "\n\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex5/" + csv_name << std::endl; std::string header = "N;M;unknowns;nz_found;times_assemble_cpu;times_assemble_gpu;times_cg;iters;norm_after"; std::fstream csv; csv.open(csv_name, std::fstream::out | std::fstream::trunc); csv << header << std::endl; csv.close(); Timer timer; std::vector<int> N_vec; for (int i = 2048; i <= 10000; i *= 2) { N_vec.push_back(i); } for (int& N: N_vec) { std::cout << "N = M = " << N << std::endl; int M = N; // // Allocate host arrays for reference // int *row_offsets = (int *)malloc(sizeof(int) * (N*M+1)); // // Allocate CUDA-arrays // int *cuda_row_offsets; int *cuda_row_offsets_2; double *cuda_values; int *cuda_columns; hipMalloc(&cuda_row_offsets, sizeof(int) * (N*M+1)); hipMalloc(&cuda_row_offsets_2, sizeof(int) * (N*M+1)); // Perform the calculations int numberOfValues; timer.reset(); hipLaunchKernelGGL(( count_nnz), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, cuda_row_offsets_2, N, M); exclusive_scan(cuda_row_offsets_2, cuda_row_offsets, N*M+1); hipMemcpy(row_offsets, cuda_row_offsets, sizeof(int) * (N*M+1), hipMemcpyDeviceToHost); numberOfValues = row_offsets[N*M]; #ifdef DEBUG printContainer(row_offsets, N*M+1, 4); std::cout << std::endl; #endif double *values = (double *)malloc(sizeof(double) * numberOfValues); int *columns = (int *)malloc(sizeof(int) * numberOfValues); hipMalloc(&cuda_columns, sizeof(int) * numberOfValues); hipMalloc(&cuda_values, sizeof(double) * numberOfValues); hipLaunchKernelGGL(( assembleA), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, cuda_values, cuda_columns, cuda_row_offsets, N, M); double time_assemble_gpu = timer.get(); hipMemcpy(values, cuda_values, sizeof(double) * numberOfValues, hipMemcpyDeviceToHost); hipMemcpy(columns, cuda_columns, sizeof(int) * numberOfValues, hipMemcpyDeviceToHost); #ifdef DEBUG printContainer(values, numberOfValues, 4); std::cout << std::endl; printContainer(columns, numberOfValues, 4); #endif /* -------- CPU -----------*/ int *csr_rowoffsets = (int *)malloc(sizeof(int) * (N*M+1)); int *csr_colindices = (int *)malloc(sizeof(int) * (N*M+1)*5); double *csr_values = (double *)malloc(sizeof(double) * (N*M+1)*5); #ifdef DEBUG std::cout << "generate CPU "<<std::endl; #endif timer.reset(); generate_fdm_laplace(N, csr_rowoffsets, csr_colindices, csr_values); double time_assemble_cpu = timer.get(); /* -------- CPU -----------*/ // // Allocate solution vector and right hand side: // double *solution = (double *)malloc(sizeof(double) * N*M); double *rhs = (double *)malloc(sizeof(double) * N*M); std::fill(rhs, rhs + N*M, 1.); timer.reset(); int iters = conjugate_gradient(N*M, cuda_row_offsets, cuda_columns, cuda_values, rhs, solution); double runtime = timer.get(); #ifdef DEBUG std::cout << "runtime: " << runtime << std::endl; #endif double residual_norm = relative_residual(N*M, row_offsets, columns, values, rhs, solution); #ifndef DEBUG csv.open (csv_name, std::fstream::out | std::fstream::app); csv << N << SEP << M << SEP << N*M << SEP << numberOfValues << SEP << time_assemble_cpu << SEP << time_assemble_gpu << SEP << runtime << SEP << iters << SEP << residual_norm << std::endl; csv.close(); #endif // // Clean up: // free(row_offsets); free(values); free(columns); free(csr_rowoffsets); free(csr_colindices); free(csr_values); hipFree(cuda_row_offsets); hipFree(cuda_row_offsets_2); hipFree(cuda_values); hipFree(cuda_columns); } std::cout << "\n\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex5/" + csv_name << std::endl; return EXIT_SUCCESS; }
c3188cd3f334ac6d986d1f375d9a8801292d5b80.cu
#include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <cmath> #include <vector> #include <iostream> #include <fstream> #include <stdio.h> // Block and grid size defines. // Seperate defines are really just for future convenience... #define CSV_NAME "fd_data_ph.csv" #define BLOCK_SIZE 256 #define GRID_SIZE 256 #define SEP ";" // #define DEBUG template <typename T> void printContainer(T container, const int size, const int only) { if (only){ for (int i = 0; i < only; ++i) std::cout << container[i] << " | "; std::cout << " ... "; for (int i = size - only; i < size; ++i) std::cout << container[i] << " | "; } else { for (int i = 0; i < size; ++i) std::cout << container[i] << " | "; } std::cout << std::endl; } // y = A * x __global__ void cuda_csr_matvec_product(int N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *x, double *y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { double sum = 0; for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) { sum += csr_values[k] * x[csr_colindices[k]]; } y[i] = sum; } } // x <- x + alpha * y __global__ void cuda_vecadd(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] += alpha * y[i]; } // x <- y + alpha * x __global__ void cuda_vecadd2(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] = y[i] + alpha * x[i]; } // result = (x, y) __global__ void cuda_dot_product(int N, double *x, double *y, double *result) { __shared__ double shared_mem[BLOCK_SIZE]; double dot = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { dot += x[i] * y[i]; } shared_mem[threadIdx.x] = dot; for (int k = blockDim.x / 2; k > 0; k /= 2) { __syncthreads(); if (threadIdx.x < k) { shared_mem[threadIdx.x] += shared_mem[threadIdx.x + k]; } } if (threadIdx.x == 0) atomicAdd(result, shared_mem[0]); } __global__ void part1(int N, double* x, double* r, double *p, double *Ap, double alpha, double beta) { // lines 2 , 3 + 4 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { x[i] = x[i] + alpha * p[i]; double r_tmp = r[i] - alpha * Ap[i]; r[i] = r_tmp; //} // Merge these two? //for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { p[i] = r_tmp + beta * p[i]; } } __global__ void part2(int N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double* r, double *p, double *Ap, double* ApAp, double* pAp, double* rr ) { __shared__ double shared_mem_ApAp[BLOCK_SIZE]; __shared__ double shared_mem_pAp[BLOCK_SIZE]; __shared__ double shared_mem_rr[BLOCK_SIZE]; // Mat-vec product double dot_ApAp = 0., dot_pAp = 0., dot_rr = 0.; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { double sum = 0; for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) { sum += csr_values[k] * p[csr_colindices[k]]; } Ap[i] = sum; dot_ApAp += sum*sum; dot_pAp += p[i]*sum; dot_rr += r[i]*r[i]; } // now : // Ap = Ap_i --> Line 5 // we are ready for reductions shared_mem_ApAp[threadIdx.x] = dot_ApAp; shared_mem_pAp[threadIdx.x] = dot_pAp; shared_mem_rr[threadIdx.x] = dot_rr; for (int k = blockDim.x / 2; k > 0; k /= 2) { __syncthreads(); if (threadIdx.x < k) { shared_mem_ApAp[threadIdx.x] += shared_mem_ApAp[threadIdx.x + k]; shared_mem_pAp[threadIdx.x] += shared_mem_pAp[threadIdx.x + k]; shared_mem_rr[threadIdx.x] += shared_mem_rr[threadIdx.x + k]; } } if (threadIdx.x == 0) { atomicAdd(ApAp, shared_mem_ApAp[0]); atomicAdd(pAp, shared_mem_pAp[0]); atomicAdd(rr, shared_mem_rr[0]); } // now: // ApAp, pAp, rr --> Line 6 } __global__ void scan_kernel_1(int const *X, int *Y, int N, int *carries) { __shared__ int shared_buffer[BLOCK_SIZE]; int my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(int *carries) { __shared__ int shared_buffer[BLOCK_SIZE]; // load data: int my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(int *Y, int N, int const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ int shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } __global__ void count_nz(int* row_offsets, int N, int M) { for(int row = blockDim.x * blockIdx.x + threadIdx.x; row < N * M; row += gridDim.x * blockDim.x) { int nz_for_this_row = 1; int i = row / N; int j = row % N; if(i > 0) nz_for_this_row += 1; if(j > 0) nz_for_this_row += 1; if(i < N-1) nz_for_this_row += 1; if(j < M-1) nz_for_this_row += 1; row_offsets[row] = nz_for_this_row; } } __global__ void assembleA(double* values, int* columns, int* row_offsets, int N, int M) { for(int row = blockDim.x * blockIdx.x + threadIdx.x; row < N*M; row += gridDim.x * blockDim.x) { int i = row / N; int j = row % N; int counter = 0; if ( i > 0) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = (i-1)*N+j; counter++; } if ( j > 0) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = i*N+(j-1); counter++; } values[(int)row_offsets[row] + counter] = 4; columns[(int)row_offsets[row] + counter] = i*N+j; counter++; if ( j < M-1) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = i*N+(j+1); counter++; } if ( i < N-1) { values[(int)row_offsets[row] + counter] = -1; columns[(int)row_offsets[row] + counter] = (i+1)*N+j; counter++; } } } void exclusive_scan(int const * input, int * output, int N) { int *carries; cudaMalloc(&carries, sizeof(int) * GRID_SIZE); // First step: Scan within each thread group and write carries scan_kernel_1<<<GRID_SIZE, BLOCK_SIZE>>>(input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group) scan_kernel_2<<<1, GRID_SIZE>>>(carries); // Third step: Offset each thread group accordingly scan_kernel_3<<<GRID_SIZE, BLOCK_SIZE>>>(output, N, carries); cudaFree(carries); } int conjugate_gradient(int N, // number of unknows int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *rhs, double *solution) //, double *init_guess) // feel free to add a nonzero initial guess as needed { // initialize timer Timer timer; // clear solution vector (it may contain garbage values): std::fill(solution, solution + N, 0); // initialize work vectors: double alpha, beta, pAp, ApAp, rr; double* cuda_pAp, *cuda_ApAp, *cuda_rr; double* cuda_x, *cuda_p, *cuda_r, *cuda_Ap; cudaMalloc(&cuda_p, sizeof(double) * N); cudaMalloc(&cuda_r, sizeof(double) * N); cudaMalloc(&cuda_Ap, sizeof(double) * N); cudaMalloc(&cuda_x, sizeof(double) * N); cudaMalloc(&cuda_pAp, sizeof(double)); cudaMalloc(&cuda_ApAp, sizeof(double)); cudaMalloc(&cuda_rr, sizeof(double)); cudaMemcpy(cuda_p, rhs, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(cuda_r, rhs, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(cuda_x, solution, sizeof(double) * N, cudaMemcpyHostToDevice); const double zero = 0; cudaMemcpy(cuda_pAp, &zero, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_ApAp, &zero, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_rr, &zero, sizeof(double), cudaMemcpyHostToDevice); // Initial values: i = 0 // device cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_r, cuda_r, cuda_rr); cuda_csr_matvec_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, csr_rowoffsets, csr_colindices, csr_values, cuda_p, cuda_Ap); cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_p, cuda_Ap, cuda_pAp); cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_Ap, cuda_Ap, cuda_ApAp); cudaMemcpy(&rr, cuda_rr, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&pAp, cuda_pAp, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&ApAp, cuda_ApAp, sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // host side of things double initial_residual_squared = rr; #ifdef DEBUG std::cout << "Initial residual norm: " << initial_residual_squared << std::endl; #endif alpha = rr / pAp; //beta = (alpha*alpha * ApAp - rr) / rr; beta = alpha * alpha * ApAp / rr - 1; int iters = 1; cudaDeviceSynchronize(); timer.reset(); while (1) { part1<<<BLOCK_SIZE, GRID_SIZE>>>(N, cuda_x, cuda_r, cuda_p, cuda_Ap, alpha, beta); cudaMemcpy(cuda_pAp, &zero, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_ApAp, &zero, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_rr, &zero, sizeof(double), cudaMemcpyHostToDevice); part2<<<BLOCK_SIZE, GRID_SIZE>>>(N, csr_rowoffsets, csr_colindices, csr_values, cuda_r, cuda_p, cuda_Ap, cuda_ApAp, cuda_pAp, cuda_rr); cudaDeviceSynchronize(); cudaMemcpy(&rr, cuda_rr, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&pAp, cuda_pAp, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&ApAp, cuda_ApAp, sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // line 10: double rel_norm = std::sqrt(rr / initial_residual_squared); if (rel_norm < 1e-6) { break; } alpha = rr / pAp; //beta = (alpha*alpha * ApAp - rr) / rr; beta = alpha * alpha * ApAp / rr - 1; #ifdef DEBUG if (iters%100==0) { std::cout << "Norm after " << iters << " iterations:\n" << "rel. norm: " << rel_norm << "\n" << "abs. norm: " << std::sqrt(beta) << std::endl; } #endif if (iters > 10000) break; // solver didn't converge ++iters; } cudaMemcpy(solution, cuda_x, sizeof(double) * N, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); #ifdef DEBUG std::cout << "Time elapsed: " << timer.get() << " (" << timer.get() / iters << " per iteration)" << std::endl; #endif if (iters > 10000) std::cout << "Conjugate Gradient did NOT converge within 10000 iterations" << std::endl; else std::cout << "Conjugate Gradient converged in " << iters << " iterations." << std::endl; // Vectors cudaFree(cuda_x); cudaFree(cuda_p); cudaFree(cuda_r); cudaFree(cuda_Ap); // Scalers cudaFree(cuda_pAp); cudaFree(cuda_ApAp); cudaFree(cuda_rr); return iters; } int main() { std::string csv_name = CSV_NAME; std::cout << "\n\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex5/" + csv_name << std::endl; std::string header = "N;M;unknowns;nz_found;times_assemble_cpu;times_assemble_gpu;times_cg;iters;norm_after"; std::fstream csv; csv.open(csv_name, std::fstream::out | std::fstream::trunc); csv << header << std::endl; csv.close(); Timer timer; std::vector<int> N_vec; for (int i = 2048; i <= 10000; i *= 2) { N_vec.push_back(i); } for (int& N: N_vec) { std::cout << "N = M = " << N << std::endl; int M = N; // // Allocate host arrays for reference // int *row_offsets = (int *)malloc(sizeof(int) * (N*M+1)); // // Allocate CUDA-arrays // int *cuda_row_offsets; int *cuda_row_offsets_2; double *cuda_values; int *cuda_columns; cudaMalloc(&cuda_row_offsets, sizeof(int) * (N*M+1)); cudaMalloc(&cuda_row_offsets_2, sizeof(int) * (N*M+1)); // Perform the calculations int numberOfValues; timer.reset(); count_nnz<<<GRID_SIZE, BLOCK_SIZE>>>(cuda_row_offsets_2, N, M); exclusive_scan(cuda_row_offsets_2, cuda_row_offsets, N*M+1); cudaMemcpy(row_offsets, cuda_row_offsets, sizeof(int) * (N*M+1), cudaMemcpyDeviceToHost); numberOfValues = row_offsets[N*M]; #ifdef DEBUG printContainer(row_offsets, N*M+1, 4); std::cout << std::endl; #endif double *values = (double *)malloc(sizeof(double) * numberOfValues); int *columns = (int *)malloc(sizeof(int) * numberOfValues); cudaMalloc(&cuda_columns, sizeof(int) * numberOfValues); cudaMalloc(&cuda_values, sizeof(double) * numberOfValues); assembleA<<<GRID_SIZE, BLOCK_SIZE>>>(cuda_values, cuda_columns, cuda_row_offsets, N, M); double time_assemble_gpu = timer.get(); cudaMemcpy(values, cuda_values, sizeof(double) * numberOfValues, cudaMemcpyDeviceToHost); cudaMemcpy(columns, cuda_columns, sizeof(int) * numberOfValues, cudaMemcpyDeviceToHost); #ifdef DEBUG printContainer(values, numberOfValues, 4); std::cout << std::endl; printContainer(columns, numberOfValues, 4); #endif /* -------- CPU -----------*/ int *csr_rowoffsets = (int *)malloc(sizeof(int) * (N*M+1)); int *csr_colindices = (int *)malloc(sizeof(int) * (N*M+1)*5); double *csr_values = (double *)malloc(sizeof(double) * (N*M+1)*5); #ifdef DEBUG std::cout << "generate CPU "<<std::endl; #endif timer.reset(); generate_fdm_laplace(N, csr_rowoffsets, csr_colindices, csr_values); double time_assemble_cpu = timer.get(); /* -------- CPU -----------*/ // // Allocate solution vector and right hand side: // double *solution = (double *)malloc(sizeof(double) * N*M); double *rhs = (double *)malloc(sizeof(double) * N*M); std::fill(rhs, rhs + N*M, 1.); timer.reset(); int iters = conjugate_gradient(N*M, cuda_row_offsets, cuda_columns, cuda_values, rhs, solution); double runtime = timer.get(); #ifdef DEBUG std::cout << "runtime: " << runtime << std::endl; #endif double residual_norm = relative_residual(N*M, row_offsets, columns, values, rhs, solution); #ifndef DEBUG csv.open (csv_name, std::fstream::out | std::fstream::app); csv << N << SEP << M << SEP << N*M << SEP << numberOfValues << SEP << time_assemble_cpu << SEP << time_assemble_gpu << SEP << runtime << SEP << iters << SEP << residual_norm << std::endl; csv.close(); #endif // // Clean up: // free(row_offsets); free(values); free(columns); free(csr_rowoffsets); free(csr_colindices); free(csr_values); cudaFree(cuda_row_offsets); cudaFree(cuda_row_offsets_2); cudaFree(cuda_values); cudaFree(cuda_columns); } std::cout << "\n\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex5/" + csv_name << std::endl; return EXIT_SUCCESS; }
96bb668fecc8b0a34b7650aef53cc31e3a01ee28.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ge_fmin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; const REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; REAL *c = NULL; hipMalloc(&c, XSIZE*YSIZE); const int offset_c = 1; const int ld_c = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ge_fmin), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ge_fmin), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ge_fmin), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
96bb668fecc8b0a34b7650aef53cc31e3a01ee28.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ge_fmin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; const REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; REAL *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); const int offset_c = 1; const int ld_c = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ge_fmin<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ge_fmin<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ge_fmin<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9dc24335940e31d15412f59d6fc8dc41080fd2fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset unsigned int r = (blockIdx.y * blockDim.y) + threadIdx.y; unsigned int c = (blockIdx.x * blockDim.x) + threadIdx.x; uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 threadsPerBlock(16, 16); const dim3 pad((numCols-threadsPerBlock.x)/2, (numRows-threadsPerBlock.y)/2); const dim3 numBlocks((numCols+2*pad.x)/threadsPerBlock.x, /* for instance 512/8 = 64*/ (numRows+2*pad.x)/threadsPerBlock.y); hipLaunchKernelGGL(( rgba_to_greyscale), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
9dc24335940e31d15412f59d6fc8dc41080fd2fa.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset unsigned int r = (blockIdx.y * blockDim.y) + threadIdx.y; unsigned int c = (blockIdx.x * blockDim.x) + threadIdx.x; uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 threadsPerBlock(16, 16); const dim3 pad((numCols-threadsPerBlock.x)/2, (numRows-threadsPerBlock.y)/2); const dim3 numBlocks((numCols+2*pad.x)/threadsPerBlock.x, /* for instance 512/8 = 64*/ (numRows+2*pad.x)/threadsPerBlock.y); rgba_to_greyscale<<<numBlocks, threadsPerBlock>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
fdde141701ec6aef7c1c0271fc242e3b55fa093c.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <limits> namespace at::native { CONSTEXPR_EXCEPT_WIN_CUDA char sinh_name[] = "sinh"; void sinh_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR static const auto sinh_string = jiterator_stringify( template <typename T> T sinh(T a) { return std::sinh(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "sinh_name", [&]() { jitted_gpu_kernel< /*name=*/sinh_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, sinh_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "sinh_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::sinh(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "sinh_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sinh(a); }); }); } } REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda); } // namespace at::native
fdde141701ec6aef7c1c0271fc242e3b55fa093c.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <limits> namespace at::native { CONSTEXPR_EXCEPT_WIN_CUDA char sinh_name[] = "sinh"; void sinh_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR static const auto sinh_string = jiterator_stringify( template <typename T> T sinh(T a) { return std::sinh(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "sinh_name", [&]() { jitted_gpu_kernel< /*name=*/sinh_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, sinh_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "sinh_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::sinh(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "sinh_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sinh(a); }); }); } } REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda); } // namespace at::native
0aabe3737781088e5bd99eb7585f5670858e5ccb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "activations.h" #include "hip/hip_runtime.h" } __device__ float lhtan_activate_kernel(float x) { if(x < 0) return .001f*x; if(x > 1) return .001f*(x-1.f) + 1.f; return x; } __device__ float lhtan_gradient_kernel(float x) { if(x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} __device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));} __device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);} __device__ float selu_activate_kernel(float x){return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;} __device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01f * (x + 4); if(x > 4) return .01f * (x - 4) + 1; return .125f*x + .5f; } __device__ float stair_activate_kernel(float x) { int n = floorf(x); if (n%2 == 0) return floorf(x/2); else return (x - n) + floorf(x/2); } __device__ float hardtan_gradient_kernel(float x) { if (x > -1 && x < 1) return 1; return 0; } __device__ float linear_gradient_kernel(float x){return 1;} __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} __device__ float loggy_gradient_kernel(float x) { float y = (x+1)/2; return 2*(1-y)*y; } __device__ float relu_gradient_kernel(float x){return (x>0);} __device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);} __device__ float selu_gradient_kernel(float x){return (x >= 0)*1.0507 + (x < 0)*(x + 1.0507*1.6732);} __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;} __device__ float stair_gradient_kernel(float x) { if (floorf(x) == x) return 0; return 1; } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case SELU: return selu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __device__ float gradient_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient_kernel(x); case LOGISTIC: return logistic_gradient_kernel(x); case LOGGY: return loggy_gradient_kernel(x); case RELU: return relu_gradient_kernel(x); case ELU: return elu_gradient_kernel(x); case SELU: return selu_gradient_kernel(x); case RELIE: return relie_gradient_kernel(x); case RAMP: return ramp_gradient_kernel(x); case LEAKY: return leaky_gradient_kernel(x); case TANH: return tanh_gradient_kernel(x); case PLSE: return plse_gradient_kernel(x); case STAIR: return stair_gradient_kernel(x); case HARDTAN: return hardtan_gradient_kernel(x); case LHTAN: return lhtan_gradient_kernel(x); } return 0; } __global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) { float de = dy[id]; dx[b*s + i] = x2*de; dx[b*s + s/2 + i] = x1*de; } } extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y) { hipLaunchKernelGGL(( binary_gradient_array_kernel), dim3(cuda_gridsize(n/2)), dim3(BLOCK), 0, 0, x, dx, n/2, size, a, y); check_error(hipPeekAtLastError()); } __global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) y[id] = x1*x2; } extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y) { hipLaunchKernelGGL(( binary_activate_array_kernel), dim3(cuda_gridsize(n/2)), dim3(BLOCK), 0, 0, x, n/2, size, a, y); check_error(hipPeekAtLastError()); } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } __global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) delta[i] *= gradient_kernel(x[i], a); } extern "C" void activate_array_gpu(float *x, int n, ACTIVATION a) { hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a); check_error(hipPeekAtLastError()); } extern "C" void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta) { hipLaunchKernelGGL(( gradient_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a, delta); check_error(hipPeekAtLastError()); }
0aabe3737781088e5bd99eb7585f5670858e5ccb.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "activations.h" #include "cuda.h" } __device__ float lhtan_activate_kernel(float x) { if(x < 0) return .001f*x; if(x > 1) return .001f*(x-1.f) + 1.f; return x; } __device__ float lhtan_gradient_kernel(float x) { if(x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} __device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));} __device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);} __device__ float selu_activate_kernel(float x){return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;} __device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01f * (x + 4); if(x > 4) return .01f * (x - 4) + 1; return .125f*x + .5f; } __device__ float stair_activate_kernel(float x) { int n = floorf(x); if (n%2 == 0) return floorf(x/2); else return (x - n) + floorf(x/2); } __device__ float hardtan_gradient_kernel(float x) { if (x > -1 && x < 1) return 1; return 0; } __device__ float linear_gradient_kernel(float x){return 1;} __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} __device__ float loggy_gradient_kernel(float x) { float y = (x+1)/2; return 2*(1-y)*y; } __device__ float relu_gradient_kernel(float x){return (x>0);} __device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);} __device__ float selu_gradient_kernel(float x){return (x >= 0)*1.0507 + (x < 0)*(x + 1.0507*1.6732);} __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;} __device__ float stair_gradient_kernel(float x) { if (floorf(x) == x) return 0; return 1; } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case SELU: return selu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __device__ float gradient_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient_kernel(x); case LOGISTIC: return logistic_gradient_kernel(x); case LOGGY: return loggy_gradient_kernel(x); case RELU: return relu_gradient_kernel(x); case ELU: return elu_gradient_kernel(x); case SELU: return selu_gradient_kernel(x); case RELIE: return relie_gradient_kernel(x); case RAMP: return ramp_gradient_kernel(x); case LEAKY: return leaky_gradient_kernel(x); case TANH: return tanh_gradient_kernel(x); case PLSE: return plse_gradient_kernel(x); case STAIR: return stair_gradient_kernel(x); case HARDTAN: return hardtan_gradient_kernel(x); case LHTAN: return lhtan_gradient_kernel(x); } return 0; } __global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) { float de = dy[id]; dx[b*s + i] = x2*de; dx[b*s + s/2 + i] = x1*de; } } extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y) { binary_gradient_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>(x, dx, n/2, size, a, y); check_error(cudaPeekAtLastError()); } __global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) y[id] = x1*x2; } extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y) { binary_activate_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>(x, n/2, size, a, y); check_error(cudaPeekAtLastError()); } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } __global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) delta[i] *= gradient_kernel(x[i], a); } extern "C" void activate_array_gpu(float *x, int n, ACTIVATION a) { activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a); check_error(cudaPeekAtLastError()); } extern "C" void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta) { gradient_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a, delta); check_error(cudaPeekAtLastError()); }
cea0c13cd26ca3f60b0282fd86998ff9879d87fd.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hash/concurrent_unordered_map.cuh> #include <cudf/types.hpp> #include <cudf_test/base_fixture.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_vector.hpp> #include <thrust/logical.h> #include <gtest/gtest.h> #include <cstdlib> #include <iostream> #include <limits> #include <random> #include <unordered_map> #include <vector> template <typename K, typename V> struct key_value_types { using key_type = K; using value_type = V; using pair_type = thrust::pair<K, V>; using map_type = concurrent_unordered_map<key_type, value_type>; }; template <typename T> struct InsertTest : public cudf::test::BaseFixture { using key_type = typename T::key_type; using value_type = typename T::value_type; using pair_type = typename T::pair_type; using map_type = typename T::map_type; InsertTest() { // prevent overflow of small types const size_t input_size = ::min(static_cast<key_type>(size), std::numeric_limits<key_type>::max()); pairs.resize(input_size); map = std::move(map_type::create(compute_hash_table_size(size))); rmm::cuda_stream_default.synchronize(); } const cudf::size_type size{10000}; rmm::device_vector<pair_type> pairs; std::unique_ptr<map_type, std::function<void(map_type*)>> map; }; using TestTypes = ::testing::Types<key_value_types<int32_t, int32_t>, key_value_types<int64_t, int64_t>, key_value_types<int8_t, int8_t>, key_value_types<int16_t, int16_t>, key_value_types<int8_t, float>, key_value_types<int16_t, double>, key_value_types<int32_t, float>, key_value_types<int64_t, double>>; TYPED_TEST_CASE(InsertTest, TestTypes); template <typename map_type, typename pair_type> struct insert_pair { insert_pair(map_type _map) : map{_map} {} __device__ bool operator()(pair_type const& pair) { auto result = map.insert(pair); if (result.first == map.end()) { return false; } return result.second; } map_type map; }; template <typename map_type, typename pair_type> struct find_pair { find_pair(map_type _map) : map{_map} {} __device__ bool operator()(pair_type const& pair) { auto result = map.find(pair.first); if (result == map.end()) { return false; } return *result == pair; } map_type map; }; template <typename pair_type, typename key_type = typename pair_type::first_type, typename value_type = typename pair_type::second_type> struct unique_pair_generator { __device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key_type(i), value_type(i)); } }; template <typename pair_type, typename key_type = typename pair_type::first_type, typename value_type = typename pair_type::second_type> struct identical_pair_generator { identical_pair_generator(key_type k = 42, value_type v = 42) : key{k}, value{v} {} __device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value); } key_type key; value_type value; }; template <typename pair_type, typename key_type = typename pair_type::first_type, typename value_type = typename pair_type::second_type> struct identical_key_generator { identical_key_generator(key_type k = 42) : key{k} {} __device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value_type(i)); } key_type key; }; TYPED_TEST(InsertTest, UniqueKeysUniqueValues) { using map_type = typename TypeParam::map_type; using pair_type = typename TypeParam::pair_type; thrust::tabulate(this->pairs.begin(), this->pairs.end(), unique_pair_generator<pair_type>{}); // All pairs should be new inserts EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map})); // All pairs should be present in the map EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map})); } TYPED_TEST(InsertTest, IdenticalKeysIdenticalValues) { using map_type = typename TypeParam::map_type; using pair_type = typename TypeParam::pair_type; thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_pair_generator<pair_type>{}); // Insert a single pair EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map})); // Identical inserts should all return false (no new insert) EXPECT_FALSE(thrust::all_of( this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map})); // All pairs should be present in the map EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map})); } TYPED_TEST(InsertTest, IdenticalKeysUniqueValues) { using map_type = typename TypeParam::map_type; using pair_type = typename TypeParam::pair_type; thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_key_generator<pair_type>{}); // Insert a single pair EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map})); // Identical key inserts should all return false (no new insert) EXPECT_FALSE(thrust::all_of( this->pairs.begin() + 1, this->pairs.end(), insert_pair<map_type, pair_type>{*this->map})); // Only first pair is present in map EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.begin() + 1, find_pair<map_type, pair_type>{*this->map})); EXPECT_FALSE(thrust::all_of( this->pairs.begin() + 1, this->pairs.end(), find_pair<map_type, pair_type>{*this->map})); } CUDF_TEST_PROGRAM_MAIN()
cea0c13cd26ca3f60b0282fd86998ff9879d87fd.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hash/concurrent_unordered_map.cuh> #include <cudf/types.hpp> #include <cudf_test/base_fixture.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_vector.hpp> #include <thrust/logical.h> #include <gtest/gtest.h> #include <cstdlib> #include <iostream> #include <limits> #include <random> #include <unordered_map> #include <vector> template <typename K, typename V> struct key_value_types { using key_type = K; using value_type = V; using pair_type = thrust::pair<K, V>; using map_type = concurrent_unordered_map<key_type, value_type>; }; template <typename T> struct InsertTest : public cudf::test::BaseFixture { using key_type = typename T::key_type; using value_type = typename T::value_type; using pair_type = typename T::pair_type; using map_type = typename T::map_type; InsertTest() { // prevent overflow of small types const size_t input_size = std::min(static_cast<key_type>(size), std::numeric_limits<key_type>::max()); pairs.resize(input_size); map = std::move(map_type::create(compute_hash_table_size(size))); rmm::cuda_stream_default.synchronize(); } const cudf::size_type size{10000}; rmm::device_vector<pair_type> pairs; std::unique_ptr<map_type, std::function<void(map_type*)>> map; }; using TestTypes = ::testing::Types<key_value_types<int32_t, int32_t>, key_value_types<int64_t, int64_t>, key_value_types<int8_t, int8_t>, key_value_types<int16_t, int16_t>, key_value_types<int8_t, float>, key_value_types<int16_t, double>, key_value_types<int32_t, float>, key_value_types<int64_t, double>>; TYPED_TEST_CASE(InsertTest, TestTypes); template <typename map_type, typename pair_type> struct insert_pair { insert_pair(map_type _map) : map{_map} {} __device__ bool operator()(pair_type const& pair) { auto result = map.insert(pair); if (result.first == map.end()) { return false; } return result.second; } map_type map; }; template <typename map_type, typename pair_type> struct find_pair { find_pair(map_type _map) : map{_map} {} __device__ bool operator()(pair_type const& pair) { auto result = map.find(pair.first); if (result == map.end()) { return false; } return *result == pair; } map_type map; }; template <typename pair_type, typename key_type = typename pair_type::first_type, typename value_type = typename pair_type::second_type> struct unique_pair_generator { __device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key_type(i), value_type(i)); } }; template <typename pair_type, typename key_type = typename pair_type::first_type, typename value_type = typename pair_type::second_type> struct identical_pair_generator { identical_pair_generator(key_type k = 42, value_type v = 42) : key{k}, value{v} {} __device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value); } key_type key; value_type value; }; template <typename pair_type, typename key_type = typename pair_type::first_type, typename value_type = typename pair_type::second_type> struct identical_key_generator { identical_key_generator(key_type k = 42) : key{k} {} __device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value_type(i)); } key_type key; }; TYPED_TEST(InsertTest, UniqueKeysUniqueValues) { using map_type = typename TypeParam::map_type; using pair_type = typename TypeParam::pair_type; thrust::tabulate(this->pairs.begin(), this->pairs.end(), unique_pair_generator<pair_type>{}); // All pairs should be new inserts EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map})); // All pairs should be present in the map EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map})); } TYPED_TEST(InsertTest, IdenticalKeysIdenticalValues) { using map_type = typename TypeParam::map_type; using pair_type = typename TypeParam::pair_type; thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_pair_generator<pair_type>{}); // Insert a single pair EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map})); // Identical inserts should all return false (no new insert) EXPECT_FALSE(thrust::all_of( this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map})); // All pairs should be present in the map EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map})); } TYPED_TEST(InsertTest, IdenticalKeysUniqueValues) { using map_type = typename TypeParam::map_type; using pair_type = typename TypeParam::pair_type; thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_key_generator<pair_type>{}); // Insert a single pair EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map})); // Identical key inserts should all return false (no new insert) EXPECT_FALSE(thrust::all_of( this->pairs.begin() + 1, this->pairs.end(), insert_pair<map_type, pair_type>{*this->map})); // Only first pair is present in map EXPECT_TRUE(thrust::all_of( this->pairs.begin(), this->pairs.begin() + 1, find_pair<map_type, pair_type>{*this->map})); EXPECT_FALSE(thrust::all_of( this->pairs.begin() + 1, this->pairs.end(), find_pair<map_type, pair_type>{*this->map})); } CUDF_TEST_PROGRAM_MAIN()
e7cacdf5b5cf51aec7f2311495798bd46feed089.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_louvain_helper.hpp" #include <cugraph/graph.hpp> #include <cugraph/utilities/device_comm.cuh> #include <cugraph/utilities/error.hpp> #include <cugraph/utilities/host_scalar_comm.cuh> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/reduce.h> #include <thrust/transform.h> namespace cugraph { namespace test { template <typename T> void single_gpu_renumber_edgelist_given_number_map(raft::handle_t const& handle, rmm::device_uvector<T>& edgelist_rows_v, rmm::device_uvector<T>& edgelist_cols_v, rmm::device_uvector<T>& renumber_map_gathered_v) { rmm::device_uvector<T> index_v(renumber_map_gathered_v.size(), handle.get_stream()); auto execution_policy = handle.get_thrust_policy(); thrust::for_each( execution_policy, thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(renumber_map_gathered_v.size()), [d_renumber_map_gathered = renumber_map_gathered_v.data(), d_index = index_v.data()] __device__( auto idx) { d_index[d_renumber_map_gathered[idx]] = idx; }); thrust::transform(execution_policy, edgelist_rows_v.begin(), edgelist_rows_v.end(), edgelist_rows_v.begin(), [d_index = index_v.data()] __device__(auto v) { return d_index[v]; }); thrust::transform(execution_policy, edgelist_cols_v.begin(), edgelist_cols_v.end(), edgelist_cols_v.begin(), [d_index = index_v.data()] __device__(auto v) { return d_index[v]; }); } template <typename vertex_t, typename edge_t, typename weight_t> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> compressed_sparse_to_edgelist(edge_t const* compressed_sparse_offsets, vertex_t const* compressed_sparse_indices, std::optional<weight_t const*> compressed_sparse_weights, vertex_t major_first, vertex_t major_last, hipStream_t stream) { edge_t number_of_edges{0}; raft::update_host( &number_of_edges, compressed_sparse_offsets + (major_last - major_first), 1, stream); CUDA_TRY(hipStreamSynchronize(stream)); rmm::device_uvector<vertex_t> edgelist_major_vertices(number_of_edges, stream); rmm::device_uvector<vertex_t> edgelist_minor_vertices(number_of_edges, stream); auto edgelist_weights = compressed_sparse_weights ? std::make_optional<rmm::device_uvector<weight_t>>(number_of_edges, stream) : std::nullopt; // FIXME: this is highly inefficient for very high-degree vertices, for better performance, we can // fill high-degree vertices using one CUDA block per vertex, mid-degree vertices using one CUDA // warp per vertex, and low-degree vertices using one CUDA thread per block thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(major_first), thrust::make_counting_iterator(major_last), [compressed_sparse_offsets, major_first, p_majors = edgelist_major_vertices.begin()] __device__(auto v) { auto first = compressed_sparse_offsets[v - major_first]; auto last = compressed_sparse_offsets[v - major_first + 1]; thrust::fill(thrust::seq, p_majors + first, p_majors + last, v); }); thrust::copy(rmm::exec_policy(stream), compressed_sparse_indices, compressed_sparse_indices + number_of_edges, edgelist_minor_vertices.begin()); if (compressed_sparse_weights) { thrust::copy(rmm::exec_policy(stream), (*compressed_sparse_weights), (*compressed_sparse_weights) + number_of_edges, (*edgelist_weights).data()); } return std::make_tuple(std::move(edgelist_major_vertices), std::move(edgelist_minor_vertices), std::move(edgelist_weights)); } template <typename vertex_t, typename weight_t> void sort_and_coarsen_edgelist( rmm::device_uvector<vertex_t>& edgelist_major_vertices /* [INOUT] */, rmm::device_uvector<vertex_t>& edgelist_minor_vertices /* [INOUT] */, std::optional<rmm::device_uvector<weight_t>>& edgelist_weights /* [INOUT] */, hipStream_t stream) { auto pair_first = thrust::make_zip_iterator( thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin())); size_t number_of_edges{0}; if (edgelist_weights) { thrust::sort_by_key(rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size(), (*edgelist_weights).begin()); rmm::device_uvector<vertex_t> tmp_edgelist_major_vertices(edgelist_major_vertices.size(), stream); rmm::device_uvector<vertex_t> tmp_edgelist_minor_vertices(tmp_edgelist_major_vertices.size(), stream); rmm::device_uvector<weight_t> tmp_edgelist_weights(tmp_edgelist_major_vertices.size(), stream); auto it = thrust::reduce_by_key( rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size(), (*edgelist_weights).begin(), thrust::make_zip_iterator(thrust::make_tuple(tmp_edgelist_major_vertices.begin(), tmp_edgelist_minor_vertices.begin())), tmp_edgelist_weights.begin()); number_of_edges = thrust::distance(tmp_edgelist_weights.begin(), thrust::get<1>(it)); edgelist_major_vertices = std::move(tmp_edgelist_major_vertices); edgelist_minor_vertices = std::move(tmp_edgelist_minor_vertices); (*edgelist_weights) = std::move(tmp_edgelist_weights); } else { thrust::sort(rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size()); auto it = thrust::unique( rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size()); number_of_edges = thrust::distance(pair_first, it); } edgelist_major_vertices.resize(number_of_edges, stream); edgelist_minor_vertices.resize(number_of_edges, stream); edgelist_major_vertices.shrink_to_fit(stream); edgelist_minor_vertices.shrink_to_fit(stream); if (edgelist_weights) { (*edgelist_weights).resize(number_of_edges, stream); (*edgelist_weights).shrink_to_fit(stream); } } template <typename vertex_t, typename edge_t, typename weight_t> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist( edge_t const* compressed_sparse_offsets, vertex_t const* compressed_sparse_indices, std::optional<weight_t const*> compressed_sparse_weights, vertex_t const* p_major_labels, vertex_t const* p_minor_labels, vertex_t major_first, vertex_t major_last, vertex_t minor_first, vertex_t minor_last, hipStream_t stream) { // FIXME: it might be possible to directly create relabled & coarsened edgelist from the // compressed sparse format to save memory auto [edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights] = compressed_sparse_to_edgelist(compressed_sparse_offsets, compressed_sparse_indices, compressed_sparse_weights, major_first, major_last, stream); auto pair_first = thrust::make_zip_iterator( thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin())); thrust::transform( rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size(), pair_first, [p_major_labels, p_minor_labels, major_first, minor_first] __device__(auto val) { return thrust::make_tuple(p_major_labels[thrust::get<0>(val) - major_first], p_minor_labels[thrust::get<1>(val) - minor_first]); }); sort_and_coarsen_edgelist( edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights, stream); return std::make_tuple(std::move(edgelist_major_vertices), std::move(edgelist_minor_vertices), std::move(edgelist_weights)); } // FIXME: better add "bool renumber" (which must be false in MG) to the coarsen_grpah function // instead of replicating the code here. single-GPU version template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed> std::unique_ptr<cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>> coarsen_graph( raft::handle_t const& handle, cugraph::graph_view_t<vertex_t, edge_t, weight_t, store_transposed, false> const& graph_view, vertex_t const* labels) { auto [coarsened_edgelist_major_vertices, coarsened_edgelist_minor_vertices, coarsened_edgelist_weights] = compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist( graph_view.get_matrix_partition_view().get_offsets(), graph_view.get_matrix_partition_view().get_indices(), graph_view.get_matrix_partition_view().get_weights(), labels, labels, vertex_t{0}, graph_view.get_number_of_vertices(), vertex_t{0}, graph_view.get_number_of_vertices(), handle.get_stream()); cugraph::edgelist_t<vertex_t, edge_t, weight_t> edgelist{}; edgelist.p_src_vertices = store_transposed ? coarsened_edgelist_minor_vertices.data() : coarsened_edgelist_major_vertices.data(); edgelist.p_dst_vertices = store_transposed ? coarsened_edgelist_major_vertices.data() : coarsened_edgelist_minor_vertices.data(); edgelist.p_edge_weights = coarsened_edgelist_weights ? std::optional<weight_t const*>{(*coarsened_edgelist_weights).data()} : std::nullopt; edgelist.number_of_edges = static_cast<edge_t>(coarsened_edgelist_major_vertices.size()); vertex_t new_number_of_vertices = 1 + thrust::reduce(handle.get_thrust_policy(), labels, labels + graph_view.get_number_of_vertices(), vertex_t{0}, thrust::maximum<vertex_t>()); return std::make_unique<cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>>( handle, edgelist, cugraph::graph_meta_t<vertex_t, edge_t, false>{ new_number_of_vertices, cugraph::graph_properties_t{graph_view.is_symmetric(), false}, std::nullopt}); } // explicit instantiation template void single_gpu_renumber_edgelist_given_number_map( raft::handle_t const& handle, rmm::device_uvector<int>& d_edgelist_rows, rmm::device_uvector<int>& d_edgelist_cols, rmm::device_uvector<int>& d_renumber_map_gathered_v); template std::unique_ptr<cugraph::graph_t<int32_t, int32_t, float, false, false>> coarsen_graph( raft::handle_t const& handle, cugraph::graph_view_t<int32_t, int32_t, float, false, false> const& graph_view, int32_t const* labels); } // namespace test } // namespace cugraph
e7cacdf5b5cf51aec7f2311495798bd46feed089.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_louvain_helper.hpp" #include <cugraph/graph.hpp> #include <cugraph/utilities/device_comm.cuh> #include <cugraph/utilities/error.hpp> #include <cugraph/utilities/host_scalar_comm.cuh> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/reduce.h> #include <thrust/transform.h> namespace cugraph { namespace test { template <typename T> void single_gpu_renumber_edgelist_given_number_map(raft::handle_t const& handle, rmm::device_uvector<T>& edgelist_rows_v, rmm::device_uvector<T>& edgelist_cols_v, rmm::device_uvector<T>& renumber_map_gathered_v) { rmm::device_uvector<T> index_v(renumber_map_gathered_v.size(), handle.get_stream()); auto execution_policy = handle.get_thrust_policy(); thrust::for_each( execution_policy, thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(renumber_map_gathered_v.size()), [d_renumber_map_gathered = renumber_map_gathered_v.data(), d_index = index_v.data()] __device__( auto idx) { d_index[d_renumber_map_gathered[idx]] = idx; }); thrust::transform(execution_policy, edgelist_rows_v.begin(), edgelist_rows_v.end(), edgelist_rows_v.begin(), [d_index = index_v.data()] __device__(auto v) { return d_index[v]; }); thrust::transform(execution_policy, edgelist_cols_v.begin(), edgelist_cols_v.end(), edgelist_cols_v.begin(), [d_index = index_v.data()] __device__(auto v) { return d_index[v]; }); } template <typename vertex_t, typename edge_t, typename weight_t> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> compressed_sparse_to_edgelist(edge_t const* compressed_sparse_offsets, vertex_t const* compressed_sparse_indices, std::optional<weight_t const*> compressed_sparse_weights, vertex_t major_first, vertex_t major_last, cudaStream_t stream) { edge_t number_of_edges{0}; raft::update_host( &number_of_edges, compressed_sparse_offsets + (major_last - major_first), 1, stream); CUDA_TRY(cudaStreamSynchronize(stream)); rmm::device_uvector<vertex_t> edgelist_major_vertices(number_of_edges, stream); rmm::device_uvector<vertex_t> edgelist_minor_vertices(number_of_edges, stream); auto edgelist_weights = compressed_sparse_weights ? std::make_optional<rmm::device_uvector<weight_t>>(number_of_edges, stream) : std::nullopt; // FIXME: this is highly inefficient for very high-degree vertices, for better performance, we can // fill high-degree vertices using one CUDA block per vertex, mid-degree vertices using one CUDA // warp per vertex, and low-degree vertices using one CUDA thread per block thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(major_first), thrust::make_counting_iterator(major_last), [compressed_sparse_offsets, major_first, p_majors = edgelist_major_vertices.begin()] __device__(auto v) { auto first = compressed_sparse_offsets[v - major_first]; auto last = compressed_sparse_offsets[v - major_first + 1]; thrust::fill(thrust::seq, p_majors + first, p_majors + last, v); }); thrust::copy(rmm::exec_policy(stream), compressed_sparse_indices, compressed_sparse_indices + number_of_edges, edgelist_minor_vertices.begin()); if (compressed_sparse_weights) { thrust::copy(rmm::exec_policy(stream), (*compressed_sparse_weights), (*compressed_sparse_weights) + number_of_edges, (*edgelist_weights).data()); } return std::make_tuple(std::move(edgelist_major_vertices), std::move(edgelist_minor_vertices), std::move(edgelist_weights)); } template <typename vertex_t, typename weight_t> void sort_and_coarsen_edgelist( rmm::device_uvector<vertex_t>& edgelist_major_vertices /* [INOUT] */, rmm::device_uvector<vertex_t>& edgelist_minor_vertices /* [INOUT] */, std::optional<rmm::device_uvector<weight_t>>& edgelist_weights /* [INOUT] */, cudaStream_t stream) { auto pair_first = thrust::make_zip_iterator( thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin())); size_t number_of_edges{0}; if (edgelist_weights) { thrust::sort_by_key(rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size(), (*edgelist_weights).begin()); rmm::device_uvector<vertex_t> tmp_edgelist_major_vertices(edgelist_major_vertices.size(), stream); rmm::device_uvector<vertex_t> tmp_edgelist_minor_vertices(tmp_edgelist_major_vertices.size(), stream); rmm::device_uvector<weight_t> tmp_edgelist_weights(tmp_edgelist_major_vertices.size(), stream); auto it = thrust::reduce_by_key( rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size(), (*edgelist_weights).begin(), thrust::make_zip_iterator(thrust::make_tuple(tmp_edgelist_major_vertices.begin(), tmp_edgelist_minor_vertices.begin())), tmp_edgelist_weights.begin()); number_of_edges = thrust::distance(tmp_edgelist_weights.begin(), thrust::get<1>(it)); edgelist_major_vertices = std::move(tmp_edgelist_major_vertices); edgelist_minor_vertices = std::move(tmp_edgelist_minor_vertices); (*edgelist_weights) = std::move(tmp_edgelist_weights); } else { thrust::sort(rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size()); auto it = thrust::unique( rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size()); number_of_edges = thrust::distance(pair_first, it); } edgelist_major_vertices.resize(number_of_edges, stream); edgelist_minor_vertices.resize(number_of_edges, stream); edgelist_major_vertices.shrink_to_fit(stream); edgelist_minor_vertices.shrink_to_fit(stream); if (edgelist_weights) { (*edgelist_weights).resize(number_of_edges, stream); (*edgelist_weights).shrink_to_fit(stream); } } template <typename vertex_t, typename edge_t, typename weight_t> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist( edge_t const* compressed_sparse_offsets, vertex_t const* compressed_sparse_indices, std::optional<weight_t const*> compressed_sparse_weights, vertex_t const* p_major_labels, vertex_t const* p_minor_labels, vertex_t major_first, vertex_t major_last, vertex_t minor_first, vertex_t minor_last, cudaStream_t stream) { // FIXME: it might be possible to directly create relabled & coarsened edgelist from the // compressed sparse format to save memory auto [edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights] = compressed_sparse_to_edgelist(compressed_sparse_offsets, compressed_sparse_indices, compressed_sparse_weights, major_first, major_last, stream); auto pair_first = thrust::make_zip_iterator( thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin())); thrust::transform( rmm::exec_policy(stream), pair_first, pair_first + edgelist_major_vertices.size(), pair_first, [p_major_labels, p_minor_labels, major_first, minor_first] __device__(auto val) { return thrust::make_tuple(p_major_labels[thrust::get<0>(val) - major_first], p_minor_labels[thrust::get<1>(val) - minor_first]); }); sort_and_coarsen_edgelist( edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights, stream); return std::make_tuple(std::move(edgelist_major_vertices), std::move(edgelist_minor_vertices), std::move(edgelist_weights)); } // FIXME: better add "bool renumber" (which must be false in MG) to the coarsen_grpah function // instead of replicating the code here. single-GPU version template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed> std::unique_ptr<cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>> coarsen_graph( raft::handle_t const& handle, cugraph::graph_view_t<vertex_t, edge_t, weight_t, store_transposed, false> const& graph_view, vertex_t const* labels) { auto [coarsened_edgelist_major_vertices, coarsened_edgelist_minor_vertices, coarsened_edgelist_weights] = compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist( graph_view.get_matrix_partition_view().get_offsets(), graph_view.get_matrix_partition_view().get_indices(), graph_view.get_matrix_partition_view().get_weights(), labels, labels, vertex_t{0}, graph_view.get_number_of_vertices(), vertex_t{0}, graph_view.get_number_of_vertices(), handle.get_stream()); cugraph::edgelist_t<vertex_t, edge_t, weight_t> edgelist{}; edgelist.p_src_vertices = store_transposed ? coarsened_edgelist_minor_vertices.data() : coarsened_edgelist_major_vertices.data(); edgelist.p_dst_vertices = store_transposed ? coarsened_edgelist_major_vertices.data() : coarsened_edgelist_minor_vertices.data(); edgelist.p_edge_weights = coarsened_edgelist_weights ? std::optional<weight_t const*>{(*coarsened_edgelist_weights).data()} : std::nullopt; edgelist.number_of_edges = static_cast<edge_t>(coarsened_edgelist_major_vertices.size()); vertex_t new_number_of_vertices = 1 + thrust::reduce(handle.get_thrust_policy(), labels, labels + graph_view.get_number_of_vertices(), vertex_t{0}, thrust::maximum<vertex_t>()); return std::make_unique<cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>>( handle, edgelist, cugraph::graph_meta_t<vertex_t, edge_t, false>{ new_number_of_vertices, cugraph::graph_properties_t{graph_view.is_symmetric(), false}, std::nullopt}); } // explicit instantiation template void single_gpu_renumber_edgelist_given_number_map( raft::handle_t const& handle, rmm::device_uvector<int>& d_edgelist_rows, rmm::device_uvector<int>& d_edgelist_cols, rmm::device_uvector<int>& d_renumber_map_gathered_v); template std::unique_ptr<cugraph::graph_t<int32_t, int32_t, float, false, false>> coarsen_graph( raft::handle_t const& handle, cugraph::graph_view_t<int32_t, int32_t, float, false, false> const& graph_view, int32_t const* labels); } // namespace test } // namespace cugraph
46f79151561d71aa56d4bbc76da3b4e6567b4772.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef ACTIVATION_H #define ACTIVATION_H #include <iostream> #include <string> #include <stdio.h> #include "Matrix.hip" // En cada gradiente agregue el termino d_in[i] // No se si esta bien // Se supone que me ahorro un paso con eso porque es el // gradiente de la activacion /* ---------------------------- Activation class ---------------------------- */ class Activation{ private: std::string name; public: Activation(std::string name_); //Default constructor virtual ~Activation(); std::string getName(); virtual void call(Matrix &in, Matrix &out) = 0; virtual void gradient(Matrix &in, Matrix &out) = 0; }; Activation::Activation(std::string name_) : name(name_) {} Activation::~Activation(){} std::string Activation::getName(){ return name; } /* ---------------------------- Sigmoid class and Kernels ---------------------------- */ __device__ __host__ float sigmoid(float x); __global__ void sigmoidKernel(float *d_in, float *d_out, int size); __global__ void sigmoidGradKernel(float *d_in, float *d_out, int size); class Sigmoid : public Activation{ public: Sigmoid(); ~Sigmoid(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Sigmoid::Sigmoid():Activation("Sigmoid") {} Sigmoid::~Sigmoid(){} void Sigmoid::call(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( sigmoidKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } void Sigmoid::gradient(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( sigmoidGradKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } __device__ __host__ float sigmoid(float x){ return 1.0f / (1 + expf(-x)); } __global__ void sigmoidKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = sigmoid(d_in[i]); i += blockDim.x * gridDim.x; } } __global__ void sigmoidGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ float sig = d_in[i] * sigmoid(d_in[i]); d_out[i] = sig * (1.0f - sig); i += blockDim.x * gridDim.x; } } /* ---------------------------- Relu class and Kernels ---------------------------- */ __global__ void reluKernel(float *d_in, float *d_out, int size); __global__ void reluGradKernel(float *d_in, float *d_out, int size); class Relu : public Activation{ public: Relu(); ~Relu(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Relu::Relu():Activation("Relu") {} Relu::~Relu(){} void Relu::call(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( reluKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } void Relu::gradient(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( reluGradKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } __global__ void reluKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = fmaxf(d_in[i], 0); i += blockDim.x * gridDim.x; } } __global__ void reluGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ if(d_in[i] > 0) d_out[i] = d_in[i] * 1.0; else d_out[i] = 0.0; i += blockDim.x * gridDim.x; } } /* ---------------------------- Linear class and Kernels ---------------------------- */ __global__ void linearKernel(float *d_in, float *d_out, int size); __global__ void linearGradKernel(float *d_in, float *d_out, int size); class Linear : public Activation{ public: Linear(); ~Linear(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Linear::Linear():Activation("Linear") {} Linear::~Linear(){} void Linear::call(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( linearKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } void Linear::gradient(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( linearGradKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } __global__ void linearKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = d_in[i]; i += blockDim.x * gridDim.x; } } __global__ void linearGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = d_in[i] * 1; i += blockDim.x * gridDim.x; } } /* ---------------------------- Tanh class and Kernels ---------------------------- */ __global__ void tanhKernel(float *d_in, float *d_out, int size); __global__ void tanhGradKernel(float *d_in, float *d_out, int size); class Tanh : public Activation{ public: Tanh(); ~Tanh(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Tanh::Tanh():Activation("Tanh") {} Tanh::~Tanh(){} void Tanh::call(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( tanhKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } void Tanh::gradient(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( tanhGradKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size); hipDeviceSynchronize(); } __global__ void tanhKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = tanhf(d_in[i]); i += blockDim.x * gridDim.x; } } __global__ void tanhGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = d_in[i] * (1.0f - powf(tanhf(d_in[i]), 2.0f)); i += blockDim.x * gridDim.x; } } /* ---------------------------- LeakyRelu class and Kernels ---------------------------- */ __global__ void leakyReluKernel(float *d_in, float *d_out, int size, float arg); __global__ void leakyReluGradKernel(float *d_in, float *d_out, int size, float arg); class LeakyRelu : public Activation{ private: float arg; public: LeakyRelu(float arg=0.1); ~LeakyRelu(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; LeakyRelu::LeakyRelu(float arg_):Activation("LeakyRelu") {arg = arg_;} LeakyRelu::~LeakyRelu(){} void LeakyRelu::call(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( leakyReluKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size, arg); hipDeviceSynchronize(); } void LeakyRelu::gradient(Matrix &in, Matrix &out){ int dev; hipGetDevice(&dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } hipLaunchKernelGGL(( leakyReluGradKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, in.getDeviceData(), out.getDeviceData(), in.size, arg); hipDeviceSynchronize(); } __global__ void leakyReluKernel(float *d_in, float *d_out, int size, float arg){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = fmaxf(d_in[i], d_in[i]*arg); i += blockDim.x * gridDim.x; } } __global__ void leakyReluGradKernel(float *d_in, float *d_out, int size, float arg){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ if(d_in[i] > 0) d_out[i] = d_in[i] * 1.0; else d_out[i] = d_in[i] * 1.0 * arg; i += blockDim.x * gridDim.x; } } #endif
46f79151561d71aa56d4bbc76da3b4e6567b4772.cu
#ifndef ACTIVATION_H #define ACTIVATION_H #include <iostream> #include <string> #include <stdio.h> #include "Matrix.cu" // En cada gradiente agregue el termino d_in[i] // No se si esta bien // Se supone que me ahorro un paso con eso porque es el // gradiente de la activacion /* ---------------------------- Activation class ---------------------------- */ class Activation{ private: std::string name; public: Activation(std::string name_); //Default constructor virtual ~Activation(); std::string getName(); virtual void call(Matrix &in, Matrix &out) = 0; virtual void gradient(Matrix &in, Matrix &out) = 0; }; Activation::Activation(std::string name_) : name(name_) {} Activation::~Activation(){} std::string Activation::getName(){ return name; } /* ---------------------------- Sigmoid class and Kernels ---------------------------- */ __device__ __host__ float sigmoid(float x); __global__ void sigmoidKernel(float *d_in, float *d_out, int size); __global__ void sigmoidGradKernel(float *d_in, float *d_out, int size); class Sigmoid : public Activation{ public: Sigmoid(); ~Sigmoid(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Sigmoid::Sigmoid():Activation("Sigmoid") {} Sigmoid::~Sigmoid(){} void Sigmoid::call(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } sigmoidKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } void Sigmoid::gradient(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } sigmoidGradKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } __device__ __host__ float sigmoid(float x){ return 1.0f / (1 + expf(-x)); } __global__ void sigmoidKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = sigmoid(d_in[i]); i += blockDim.x * gridDim.x; } } __global__ void sigmoidGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ float sig = d_in[i] * sigmoid(d_in[i]); d_out[i] = sig * (1.0f - sig); i += blockDim.x * gridDim.x; } } /* ---------------------------- Relu class and Kernels ---------------------------- */ __global__ void reluKernel(float *d_in, float *d_out, int size); __global__ void reluGradKernel(float *d_in, float *d_out, int size); class Relu : public Activation{ public: Relu(); ~Relu(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Relu::Relu():Activation("Relu") {} Relu::~Relu(){} void Relu::call(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } reluKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } void Relu::gradient(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } reluGradKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } __global__ void reluKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = fmaxf(d_in[i], 0); i += blockDim.x * gridDim.x; } } __global__ void reluGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ if(d_in[i] > 0) d_out[i] = d_in[i] * 1.0; else d_out[i] = 0.0; i += blockDim.x * gridDim.x; } } /* ---------------------------- Linear class and Kernels ---------------------------- */ __global__ void linearKernel(float *d_in, float *d_out, int size); __global__ void linearGradKernel(float *d_in, float *d_out, int size); class Linear : public Activation{ public: Linear(); ~Linear(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Linear::Linear():Activation("Linear") {} Linear::~Linear(){} void Linear::call(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } linearKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } void Linear::gradient(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } linearGradKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } __global__ void linearKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = d_in[i]; i += blockDim.x * gridDim.x; } } __global__ void linearGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = d_in[i] * 1; i += blockDim.x * gridDim.x; } } /* ---------------------------- Tanh class and Kernels ---------------------------- */ __global__ void tanhKernel(float *d_in, float *d_out, int size); __global__ void tanhGradKernel(float *d_in, float *d_out, int size); class Tanh : public Activation{ public: Tanh(); ~Tanh(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; Tanh::Tanh():Activation("Tanh") {} Tanh::~Tanh(){} void Tanh::call(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } tanhKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } void Tanh::gradient(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } tanhGradKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size); cudaDeviceSynchronize(); } __global__ void tanhKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = tanhf(d_in[i]); i += blockDim.x * gridDim.x; } } __global__ void tanhGradKernel(float *d_in, float *d_out, int size){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = d_in[i] * (1.0f - powf(tanhf(d_in[i]), 2.0f)); i += blockDim.x * gridDim.x; } } /* ---------------------------- LeakyRelu class and Kernels ---------------------------- */ __global__ void leakyReluKernel(float *d_in, float *d_out, int size, float arg); __global__ void leakyReluGradKernel(float *d_in, float *d_out, int size, float arg); class LeakyRelu : public Activation{ private: float arg; public: LeakyRelu(float arg=0.1); ~LeakyRelu(); void call(Matrix &in, Matrix &out); void gradient(Matrix &in, Matrix &out); }; LeakyRelu::LeakyRelu(float arg_):Activation("LeakyRelu") {arg = arg_;} LeakyRelu::~LeakyRelu(){} void LeakyRelu::call(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } leakyReluKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size, arg); cudaDeviceSynchronize(); } void LeakyRelu::gradient(Matrix &in, Matrix &out){ int dev; cudaGetDevice(&dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); // dim3 nThreads(256); dim3 nThreads(deviceProp.maxThreadsDim[0]); dim3 nBlocks((in.size + nThreads.x - 1) / nThreads.x); if(nBlocks.x > deviceProp.maxGridSize[0]){ nBlocks.x = deviceProp.maxGridSize[0]; } leakyReluGradKernel<<< nBlocks, nThreads >>>(in.getDeviceData(), out.getDeviceData(), in.size, arg); cudaDeviceSynchronize(); } __global__ void leakyReluKernel(float *d_in, float *d_out, int size, float arg){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ d_out[i] = fmaxf(d_in[i], d_in[i]*arg); i += blockDim.x * gridDim.x; } } __global__ void leakyReluGradKernel(float *d_in, float *d_out, int size, float arg){ int i = blockIdx.x * blockDim.x + threadIdx.x; while(i < size){ if(d_in[i] > 0) d_out[i] = d_in[i] * 1.0; else d_out[i] = d_in[i] * 1.0 * arg; i += blockDim.x * gridDim.x; } } #endif
6e4d8157bb371c5cf3464c3f2eda05194ad2aa1a.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ __global__ void kernAdvanceScan(int n, int offset, int* a, int* b) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } if (idx >= offset) { b[idx] = a[idx - offset] + a[idx]; } else { b[idx] = a[idx]; } } void scan(int n, int *odata, const int *idata, int *cudaA, int *cudaB) { dim3 fullBlocksPerGrid((n + blocksize - 1) / blocksize); int kmax = ilog2ceil(n); timer().startGpuTimer(); for (int k = 1; k <= kmax; ++k) { // invoke kernel int offset = (int)pow(2, k - 1); hipLaunchKernelGGL(( kernAdvanceScan), dim3(fullBlocksPerGrid), dim3(blocksize), 0, 0, n - 1, offset, cudaA, cudaB); // pointer swap int *temp = cudaA; cudaA = cudaB; cudaB = temp; } timer().endGpuTimer(); hipMemcpy(odata + 1, cudaA, (n - 1) * sizeof(int), hipMemcpyDeviceToHost); odata[0] = 0; } } }
6e4d8157bb371c5cf3464c3f2eda05194ad2aa1a.cu
#define GLM_FORCE_CUDA #include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ __global__ void kernAdvanceScan(int n, int offset, int* a, int* b) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } if (idx >= offset) { b[idx] = a[idx - offset] + a[idx]; } else { b[idx] = a[idx]; } } void scan(int n, int *odata, const int *idata, int *cudaA, int *cudaB) { dim3 fullBlocksPerGrid((n + blocksize - 1) / blocksize); int kmax = ilog2ceil(n); timer().startGpuTimer(); for (int k = 1; k <= kmax; ++k) { // invoke kernel int offset = (int)pow(2, k - 1); kernAdvanceScan<<<fullBlocksPerGrid, blocksize>>>(n - 1, offset, cudaA, cudaB); // pointer swap int *temp = cudaA; cudaA = cudaB; cudaB = temp; } timer().endGpuTimer(); cudaMemcpy(odata + 1, cudaA, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); odata[0] = 0; } } }
bb7e3a75c69382aa60c642780998c8c7642f1313.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include "THHThrustAllocator.cuh" #include "THHApply.cuh" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #if TORCH_HIP_VERSION >= 7000 #include <thrust/system/hip/execution_policy.h> #endif template <typename Dtype, typename Acctype> struct mse_functor { mse_functor() {} __host__ __device__ Acctype operator()(const Dtype &x, const Dtype &y) const { Acctype z = ScalarConvert<Dtype, Acctype>::to(x)-y; return z*z; } }; template <typename Dtype> struct mse_updateOutput_functor { mse_updateOutput_functor() {} __device__ void operator()( const Dtype *input, const Dtype *target, Dtype *output) { Dtype diff = THCNumerics<Dtype>::sub(*input, *target); *output = THCNumerics<Dtype>::mul(diff, diff); } }; template <typename Dtype, typename Acctype> struct mse_updateGradInput_functor { const Acctype norm; mse_updateGradInput_functor(Acctype norm_) : norm(norm_) {} __host__ __device__ Dtype operator()(const Dtype &x, const Dtype &y) const { return ScalarConvert<Acctype, Dtype>::to(norm * (ScalarConvert<Dtype, Acctype>::to(x) - y)); } }; #include "generic/MSECriterion.cu" #include "THHGenerateFloatTypes.h"
bb7e3a75c69382aa60c642780998c8c7642f1313.cu
#include "THCUNN.h" #include "common.h" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include "THCThrustAllocator.cuh" #include "THCApply.cuh" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #if CUDA_VERSION >= 7000 #include <thrust/system/cuda/execution_policy.h> #endif template <typename Dtype, typename Acctype> struct mse_functor { mse_functor() {} __host__ __device__ Acctype operator()(const Dtype &x, const Dtype &y) const { Acctype z = ScalarConvert<Dtype, Acctype>::to(x)-y; return z*z; } }; template <typename Dtype> struct mse_updateOutput_functor { mse_updateOutput_functor() {} __device__ void operator()( const Dtype *input, const Dtype *target, Dtype *output) { Dtype diff = THCNumerics<Dtype>::sub(*input, *target); *output = THCNumerics<Dtype>::mul(diff, diff); } }; template <typename Dtype, typename Acctype> struct mse_updateGradInput_functor { const Acctype norm; mse_updateGradInput_functor(Acctype norm_) : norm(norm_) {} __host__ __device__ Dtype operator()(const Dtype &x, const Dtype &y) const { return ScalarConvert<Acctype, Dtype>::to(norm * (ScalarConvert<Dtype, Acctype>::to(x) - y)); } }; #include "generic/MSECriterion.cu" #include "THCGenerateFloatTypes.h"
d19b8b3d801098062a92308fc17380e642ccf995.hip
// !!! This is a file automatically generated by hipify!!! #include "FreeImage.h" #include "stdio.h" #define DIM 2000 struct hipComplex{ float r; float i; hipComplex( float a, float b) : r(a), i(b) {}; float magnitude2(void) { return r*r + i*i; } hipComplex operator*(const hipComplex& a) { return hipComplex( r * a.r - i * a.i, i * a.r + r * a.i ); } hipComplex operator+(const hipComplex& a) { return hipComplex(r+ a.r, i + a.i); } }; int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); hipComplex c(-0.8, 0.156); hipComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return 0; } return 1; } void kernel(char* ptr) { for (int y = 0; y < DIM; y++) for (int x = 0; x < DIM; x++) { int offset = x + y * DIM; ptr[offset] = julia(x, y); } } int main() { FreeImage_Initialise(); atexit(FreeImage_DeInitialise); FIBITMAP* bitmap = FreeImage_Allocate(DIM, DIM, 24); char charmap[DIM][DIM]; kernel(&charmap[0][0]); RGBQUAD color; for (int i = 0; i < DIM; i++) { for (int j = 0; j < DIM; j++) { color.rgbRed = 0; color.rgbGreen = 0; color.rgbBlue = 0; if (charmap[i][j] == 1) color.rgbGreen = 255.0; FreeImage_SetPixelColor(bitmap, i, j, &color); } } FreeImage_Save(FIF_PNG, bitmap, "output.png", 0); FreeImage_Unload(bitmap); return 0; }
d19b8b3d801098062a92308fc17380e642ccf995.cu
#include "FreeImage.h" #include "stdio.h" #define DIM 2000 struct cuComplex{ float r; float i; cuComplex( float a, float b) : r(a), i(b) {}; float magnitude2(void) { return r*r + i*i; } cuComplex operator*(const cuComplex& a) { return cuComplex( r * a.r - i * a.i, i * a.r + r * a.i ); } cuComplex operator+(const cuComplex& a) { return cuComplex(r+ a.r, i + a.i); } }; int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); cuComplex c(-0.8, 0.156); cuComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return 0; } return 1; } void kernel(char* ptr) { for (int y = 0; y < DIM; y++) for (int x = 0; x < DIM; x++) { int offset = x + y * DIM; ptr[offset] = julia(x, y); } } int main() { FreeImage_Initialise(); atexit(FreeImage_DeInitialise); FIBITMAP* bitmap = FreeImage_Allocate(DIM, DIM, 24); char charmap[DIM][DIM]; kernel(&charmap[0][0]); RGBQUAD color; for (int i = 0; i < DIM; i++) { for (int j = 0; j < DIM; j++) { color.rgbRed = 0; color.rgbGreen = 0; color.rgbBlue = 0; if (charmap[i][j] == 1) color.rgbGreen = 255.0; FreeImage_SetPixelColor(bitmap, i, j, &color); } } FreeImage_Save(FIF_PNG, bitmap, "output.png", 0); FreeImage_Unload(bitmap); return 0; }
8a9b3ae46112fdad40c756757bf31f539f9e6ddb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/layers/reorg_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void reorg_kernel(const Dtype *x, int w, int h, int c, int batch, int stride, int forward, Dtype *out) { int size = batch * c * h * w; int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if(i >= size) return; int in_index = i; int in_w = i % w; i = i / w; int in_h = i % h; i = i / h; int in_c = i % c; i = i / c; int b = i % batch; int out_c = c / (stride * stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w * stride + offset % stride; int h2 = in_h * stride + offset / stride; int out_index = w2 + w * stride * (h2 + h * stride * (c2 + out_c * b)); if (forward) { out[out_index] = x[in_index]; } else { out[in_index] = x[out_index]; } } template<typename Dtype> void ReorgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); int count = bottom[0]->count(); Dtype *top_data = top[0]->mutable_gpu_data(); hipLaunchKernelGGL(( reorg_kernel<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_data, width_, height_, channels_, batch_num_, stride_, reverse_, top_data); } template<typename Dtype> void ReorgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if (!propagate_down[0]) { return; } int count = diff_.count(); const Dtype *top_diff = diff_.mutable_gpu_diff(); Dtype *bottom_diff = bottom[0]->mutable_gpu_diff(); hipLaunchKernelGGL(( reorg_kernel<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top_diff, width_, height_, channels_, batch_num_, stride_, !reverse_, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(ReorgLayer); } // namespace caffe
8a9b3ae46112fdad40c756757bf31f539f9e6ddb.cu
#include "caffe/layers/reorg_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void reorg_kernel(const Dtype *x, int w, int h, int c, int batch, int stride, int forward, Dtype *out) { int size = batch * c * h * w; int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if(i >= size) return; int in_index = i; int in_w = i % w; i = i / w; int in_h = i % h; i = i / h; int in_c = i % c; i = i / c; int b = i % batch; int out_c = c / (stride * stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w * stride + offset % stride; int h2 = in_h * stride + offset / stride; int out_index = w2 + w * stride * (h2 + h * stride * (c2 + out_c * b)); if (forward) { out[out_index] = x[in_index]; } else { out[in_index] = x[out_index]; } } template<typename Dtype> void ReorgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); int count = bottom[0]->count(); Dtype *top_data = top[0]->mutable_gpu_data(); reorg_kernel<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(bottom_data, width_, height_, channels_, batch_num_, stride_, reverse_, top_data); } template<typename Dtype> void ReorgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if (!propagate_down[0]) { return; } int count = diff_.count(); const Dtype *top_diff = diff_.mutable_gpu_diff(); Dtype *bottom_diff = bottom[0]->mutable_gpu_diff(); reorg_kernel<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(top_diff, width_, height_, channels_, batch_num_, stride_, !reverse_, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(ReorgLayer); } // namespace caffe
19358aff349c4071339e1ca59da9b3302ef50b4a.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com All rights reserved. https://github.com/robertwgh/cuLDPC Implementation of LDPC decoding algorithm. The details of implementation can be found from the following papers: 1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE. 2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE. The current release is close to the GlobalSIP2013 paper. */ #include <stdio.h> #include <stdlib.h> #include <memory.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include "LDPC.h" #include "matrix.h" #include "kernel.hip" float sigma ; int *info_bin ; int main() { printf("GPU LDPC Decoder\r\nComputing...\r\n"); // For cnp kernel #if MODE == WIMAX const char h_element_count1[BLK_ROW] = {6, 7, 7, 6, 6, 7, 6, 6, 7, 6, 6, 6}; const char h_element_count2[BLK_COL] = {3, 3, 6, 3, 3, 6, 3, 6, 3, 6, 3, 6, \ 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}; #else const char h_element_count1[BLK_ROW] = {7, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 8}; const char h_element_count2[BLK_COL] = {11,4, 3, 3,11, 3, 3, 3,11, 3, 3, 3, \ 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}; #endif h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R h_element h_element_temp; // init the compact matrix for(int i = 0; i < H_COMPACT1_COL; i++) { for(int j = 0; j < H_COMPACT1_ROW; j ++) { h_element_temp.x = 0; h_element_temp.y = 0; h_element_temp.value = -1; h_element_temp.valid = 0; h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column } } // scan the h matrix, and gengerate compact mode of h for(int i = 0; i < BLK_ROW; i++) { int k = 0; for(int j = 0; j < BLK_COL; j ++) { if(h_base[i][j] != -1) { h_element_temp.x = i; h_element_temp.y = j; h_element_temp.value = h_base[i][j]; h_element_temp.valid = 1; h_compact1[k][i] = h_element_temp; k++; } } // printf("row %d, #element=%d\n", i, k); } // h_compact2 h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr // init the compact matrix for(int i = 0; i < H_COMPACT2_ROW; i++) { for(int j = 0; j < H_COMPACT2_COL; j ++) { h_element_temp.x = 0; h_element_temp.y = 0; h_element_temp.value = -1; h_element_temp.valid = 0; h_compact2[i][j] = h_element_temp; } } for(int j = 0; j < BLK_COL; j++) { int k = 0; for(int i = 0; i < BLK_ROW; i ++) { if(h_base[i][j] != -1) { // although h is transposed, the (x,y) is still (iBlkRow, iBlkCol) h_element_temp.x = i; h_element_temp.y = j; h_element_temp.value = h_base[i][j]; h_element_temp.valid = 1; h_compact2[k][j] = h_element_temp; k++; } } } //int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int); int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element); int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element); int memorySize_infobits = INFO_LEN * sizeof(int); int memorySize_codeword = CODEWORD_LEN * sizeof(int); int memorySize_llr = CODEWORD_LEN * sizeof(float); info_bin = (int *) malloc(memorySize_infobits) ; int *codeword = (int *) malloc(memorySize_codeword) ; float *trans = (float *) malloc(memorySize_llr) ; float *recv = (float *) malloc(memorySize_llr) ; float *llr = (float *) malloc(memorySize_llr) ; float rate = (float)0.5f; ////////////////////////////////////////////////////////////////////////////////// // all the variables Starting with _gpu is used in host code and for gpu computation int memorySize_infobits_gpu = MCW * CW * memorySize_infobits ; int memorySize_llr_gpu = MCW * CW * CODEWORD_LEN * sizeof(float); int memorySize_dt_gpu = MCW * CW * ROW * BLK_COL * sizeof(float); int memorySize_R_gpu = MCW * CW * ROW * BLK_COL * sizeof(float); int memorySize_hard_decision_gpu = MCW * CW * CODEWORD_LEN * sizeof(int); int *info_bin_gpu; float *llr_gpu; int * hard_decision_gpu; info_bin_gpu = (int *) malloc(memorySize_infobits_gpu); hard_decision_gpu = (int *) malloc(memorySize_hard_decision_gpu); llr_gpu = (float *) malloc(memorySize_llr_gpu); error_result this_error; int total_frame_error = 0; int total_bit_error = 0; int total_codeword = 0; // create device memory float * dev_llr; float * dev_dt; float * dev_R; int * dev_hard_decision; h_element * dev_h_compact1; h_element * dev_h_compact2; char * dev_h_element_count1; char * dev_h_element_count2; hipMalloc((void **)&dev_llr, memorySize_llr_gpu); hipMalloc((void **)&dev_dt, memorySize_dt_gpu); hipMalloc((void **)&dev_R, memorySize_R_gpu); hipMalloc((void **)&dev_hard_decision, memorySize_hard_decision_gpu); hipMalloc((void **)&dev_h_compact1, memorySize_h_compact1); hipMalloc((void **)&dev_h_compact2, memorySize_h_compact2); hipMalloc((void **)&dev_h_element_count1, BLK_ROW); hipMalloc((void **)&dev_h_element_count2, BLK_COL); hipMemcpy(dev_h_element_count1, h_element_count1, BLK_ROW, hipMemcpyHostToDevice); hipMemcpy(dev_h_element_count2, h_element_count2, BLK_COL, hipMemcpyHostToDevice); hipMemcpy(dev_h_compact1, h_compact1, memorySize_h_compact1, hipMemcpyHostToDevice); hipMemcpy(dev_h_compact2, h_compact2, memorySize_h_compact2, hipMemcpyHostToDevice); srand(69012); for(int snri = 0; snri < NUM_SNR; snri++) { float snr = snr_array[snri]; sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f))); total_codeword = 0; total_frame_error = 0; total_bit_error = 0; // Adjust MIN_CODWORD in LDPC.h to reduce simulation time while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD)) { total_codeword += CW * MCW; for(int i = 0; i < CW * MCW; i++) { // generate random data info_gen (info_bin, rand()); // encode the data structure_encode (info_bin, codeword, h_base); // BPSK modulation modulation (codeword, trans); // additive white Gaussian noise awgn (trans, recv, rand()); // LLR init llr_init (llr, recv); // copy the info_bin and llr to the total memory memcpy(info_bin_gpu + i * INFO_LEN, info_bin, memorySize_infobits); memcpy(llr_gpu + i * CODEWORD_LEN, llr, memorySize_llr); } // Define kernel dimension dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks dim3 dimBlockKernel1(BLOCK_SIZE_X, CW, 1); int sharedRCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT * sizeof(float); dim3 dimGridKernel2(BLK_COL, MCW, 1); dim3 dimBlockKernel2(BLOCK_SIZE_X, CW, 1); //int sharedDtCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT_VNP * sizeof(float); // run the kernel float total_time = 0.f; for(int j = 0; j < MAX_SIM; j++) { // Transfer LLR data into device. hipMemcpy(dev_llr, llr_gpu, memorySize_llr_gpu, hipMemcpyHostToDevice); // kernel launch hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for(int ii = 0; ii < MAX_ITERATION; ii++) { // run check-node processing kernel // TODO: run a special kernel the first iteration? if(ii == 0) { hipLaunchKernelGGL(( ldpc_cnp_kernel_1st_iter), dim3(dimGridKernel1), dim3(dimBlockKernel1), 0, 0, dev_llr, dev_dt, dev_R, dev_h_element_count1, dev_h_compact1); } else { hipLaunchKernelGGL(( ldpc_cnp_kernel), dim3(dimGridKernel1), dim3(dimBlockKernel1), sharedRCacheSize, 0, dev_llr, dev_dt, dev_R, dev_h_element_count1, dev_h_compact1); } // run variable-node processing kernel // for the last iteration we run a special // kernel. this is because we can make a hard // decision instead of writing back the belief // for the value of each bit. if(ii < MAX_ITERATION - 1) { hipLaunchKernelGGL(( ldpc_vnp_kernel_normal), dim3(dimGridKernel2), dim3(dimBlockKernel2), 0, 0, dev_llr, dev_dt, dev_h_element_count2, dev_h_compact2); } else { hipLaunchKernelGGL(( ldpc_vnp_kernel_last_iter), dim3(dimGridKernel2), dim3(dimBlockKernel2), 0, 0, dev_llr, dev_dt, dev_hard_decision, dev_h_element_count2, dev_h_compact2); } } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); total_time += time; // copy the decoded data from device to host hipMemcpy(hard_decision_gpu, dev_hard_decision, memorySize_hard_decision_gpu, hipMemcpyDeviceToHost); this_error = error_check(info_bin_gpu, hard_decision_gpu); total_bit_error += this_error.bit_error; total_frame_error += this_error.frame_error; } // end of MAX-SIM printf ("\n"); printf ("Total kernel execution time: %f (s)\n", total_time * 1e-9f); printf ("# codewords = %d, CW=%d, MCW=%d\n",total_codeword, CW, MCW); printf ("total bit error = %d\n", total_bit_error); printf ("total frame error = %d\n", total_frame_error); printf ("BER = %1.2e, FER = %1.2e\n", (float) total_bit_error/total_codeword/INFO_LEN, (float) total_frame_error/total_codeword); } // end of the MAX frame error. }// end of the snr loop hipFree(dev_llr); hipFree(dev_dt); hipFree(dev_R); hipFree(dev_hard_decision); hipFree(dev_h_compact1); hipFree(dev_h_compact2); hipFree(dev_h_element_count1); hipFree(dev_h_element_count2); free(info_bin); free(codeword); free(trans); free(recv); free(llr); free(llr_gpu); free(hard_decision_gpu); free(info_bin_gpu); return 0; }
19358aff349c4071339e1ca59da9b3302ef50b4a.cu
/* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com All rights reserved. https://github.com/robertwgh/cuLDPC Implementation of LDPC decoding algorithm. The details of implementation can be found from the following papers: 1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE. 2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE. The current release is close to the GlobalSIP2013 paper. */ #include <stdio.h> #include <stdlib.h> #include <memory.h> #include <math.h> #include <chrono> #include <cuda.h> #include "LDPC.h" #include "matrix.h" #include "kernel.cu" float sigma ; int *info_bin ; int main() { printf("GPU LDPC Decoder\r\nComputing...\r\n"); // For cnp kernel #if MODE == WIMAX const char h_element_count1[BLK_ROW] = {6, 7, 7, 6, 6, 7, 6, 6, 7, 6, 6, 6}; const char h_element_count2[BLK_COL] = {3, 3, 6, 3, 3, 6, 3, 6, 3, 6, 3, 6, \ 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}; #else const char h_element_count1[BLK_ROW] = {7, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 8}; const char h_element_count2[BLK_COL] = {11,4, 3, 3,11, 3, 3, 3,11, 3, 3, 3, \ 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}; #endif h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R h_element h_element_temp; // init the compact matrix for(int i = 0; i < H_COMPACT1_COL; i++) { for(int j = 0; j < H_COMPACT1_ROW; j ++) { h_element_temp.x = 0; h_element_temp.y = 0; h_element_temp.value = -1; h_element_temp.valid = 0; h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column } } // scan the h matrix, and gengerate compact mode of h for(int i = 0; i < BLK_ROW; i++) { int k = 0; for(int j = 0; j < BLK_COL; j ++) { if(h_base[i][j] != -1) { h_element_temp.x = i; h_element_temp.y = j; h_element_temp.value = h_base[i][j]; h_element_temp.valid = 1; h_compact1[k][i] = h_element_temp; k++; } } // printf("row %d, #element=%d\n", i, k); } // h_compact2 h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr // init the compact matrix for(int i = 0; i < H_COMPACT2_ROW; i++) { for(int j = 0; j < H_COMPACT2_COL; j ++) { h_element_temp.x = 0; h_element_temp.y = 0; h_element_temp.value = -1; h_element_temp.valid = 0; h_compact2[i][j] = h_element_temp; } } for(int j = 0; j < BLK_COL; j++) { int k = 0; for(int i = 0; i < BLK_ROW; i ++) { if(h_base[i][j] != -1) { // although h is transposed, the (x,y) is still (iBlkRow, iBlkCol) h_element_temp.x = i; h_element_temp.y = j; h_element_temp.value = h_base[i][j]; h_element_temp.valid = 1; h_compact2[k][j] = h_element_temp; k++; } } } //int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int); int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element); int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element); int memorySize_infobits = INFO_LEN * sizeof(int); int memorySize_codeword = CODEWORD_LEN * sizeof(int); int memorySize_llr = CODEWORD_LEN * sizeof(float); info_bin = (int *) malloc(memorySize_infobits) ; int *codeword = (int *) malloc(memorySize_codeword) ; float *trans = (float *) malloc(memorySize_llr) ; float *recv = (float *) malloc(memorySize_llr) ; float *llr = (float *) malloc(memorySize_llr) ; float rate = (float)0.5f; ////////////////////////////////////////////////////////////////////////////////// // all the variables Starting with _gpu is used in host code and for gpu computation int memorySize_infobits_gpu = MCW * CW * memorySize_infobits ; int memorySize_llr_gpu = MCW * CW * CODEWORD_LEN * sizeof(float); int memorySize_dt_gpu = MCW * CW * ROW * BLK_COL * sizeof(float); int memorySize_R_gpu = MCW * CW * ROW * BLK_COL * sizeof(float); int memorySize_hard_decision_gpu = MCW * CW * CODEWORD_LEN * sizeof(int); int *info_bin_gpu; float *llr_gpu; int * hard_decision_gpu; info_bin_gpu = (int *) malloc(memorySize_infobits_gpu); hard_decision_gpu = (int *) malloc(memorySize_hard_decision_gpu); llr_gpu = (float *) malloc(memorySize_llr_gpu); error_result this_error; int total_frame_error = 0; int total_bit_error = 0; int total_codeword = 0; // create device memory float * dev_llr; float * dev_dt; float * dev_R; int * dev_hard_decision; h_element * dev_h_compact1; h_element * dev_h_compact2; char * dev_h_element_count1; char * dev_h_element_count2; cudaMalloc((void **)&dev_llr, memorySize_llr_gpu); cudaMalloc((void **)&dev_dt, memorySize_dt_gpu); cudaMalloc((void **)&dev_R, memorySize_R_gpu); cudaMalloc((void **)&dev_hard_decision, memorySize_hard_decision_gpu); cudaMalloc((void **)&dev_h_compact1, memorySize_h_compact1); cudaMalloc((void **)&dev_h_compact2, memorySize_h_compact2); cudaMalloc((void **)&dev_h_element_count1, BLK_ROW); cudaMalloc((void **)&dev_h_element_count2, BLK_COL); cudaMemcpy(dev_h_element_count1, h_element_count1, BLK_ROW, cudaMemcpyHostToDevice); cudaMemcpy(dev_h_element_count2, h_element_count2, BLK_COL, cudaMemcpyHostToDevice); cudaMemcpy(dev_h_compact1, h_compact1, memorySize_h_compact1, cudaMemcpyHostToDevice); cudaMemcpy(dev_h_compact2, h_compact2, memorySize_h_compact2, cudaMemcpyHostToDevice); srand(69012); for(int snri = 0; snri < NUM_SNR; snri++) { float snr = snr_array[snri]; sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f))); total_codeword = 0; total_frame_error = 0; total_bit_error = 0; // Adjust MIN_CODWORD in LDPC.h to reduce simulation time while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD)) { total_codeword += CW * MCW; for(int i = 0; i < CW * MCW; i++) { // generate random data info_gen (info_bin, rand()); // encode the data structure_encode (info_bin, codeword, h_base); // BPSK modulation modulation (codeword, trans); // additive white Gaussian noise awgn (trans, recv, rand()); // LLR init llr_init (llr, recv); // copy the info_bin and llr to the total memory memcpy(info_bin_gpu + i * INFO_LEN, info_bin, memorySize_infobits); memcpy(llr_gpu + i * CODEWORD_LEN, llr, memorySize_llr); } // Define kernel dimension dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks dim3 dimBlockKernel1(BLOCK_SIZE_X, CW, 1); int sharedRCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT * sizeof(float); dim3 dimGridKernel2(BLK_COL, MCW, 1); dim3 dimBlockKernel2(BLOCK_SIZE_X, CW, 1); //int sharedDtCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT_VNP * sizeof(float); // run the kernel float total_time = 0.f; for(int j = 0; j < MAX_SIM; j++) { // Transfer LLR data into device. cudaMemcpy(dev_llr, llr_gpu, memorySize_llr_gpu, cudaMemcpyHostToDevice); // kernel launch cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for(int ii = 0; ii < MAX_ITERATION; ii++) { // run check-node processing kernel // TODO: run a special kernel the first iteration? if(ii == 0) { ldpc_cnp_kernel_1st_iter<<< dimGridKernel1, dimBlockKernel1>>> (dev_llr, dev_dt, dev_R, dev_h_element_count1, dev_h_compact1); } else { ldpc_cnp_kernel<<< dimGridKernel1, dimBlockKernel1, sharedRCacheSize>>> (dev_llr, dev_dt, dev_R, dev_h_element_count1, dev_h_compact1); } // run variable-node processing kernel // for the last iteration we run a special // kernel. this is because we can make a hard // decision instead of writing back the belief // for the value of each bit. if(ii < MAX_ITERATION - 1) { ldpc_vnp_kernel_normal<<< dimGridKernel2, dimBlockKernel2>>> (dev_llr, dev_dt, dev_h_element_count2, dev_h_compact2); } else { ldpc_vnp_kernel_last_iter<<< dimGridKernel2, dimBlockKernel2>>> (dev_llr, dev_dt, dev_hard_decision, dev_h_element_count2, dev_h_compact2); } } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); total_time += time; // copy the decoded data from device to host cudaMemcpy(hard_decision_gpu, dev_hard_decision, memorySize_hard_decision_gpu, cudaMemcpyDeviceToHost); this_error = error_check(info_bin_gpu, hard_decision_gpu); total_bit_error += this_error.bit_error; total_frame_error += this_error.frame_error; } // end of MAX-SIM printf ("\n"); printf ("Total kernel execution time: %f (s)\n", total_time * 1e-9f); printf ("# codewords = %d, CW=%d, MCW=%d\n",total_codeword, CW, MCW); printf ("total bit error = %d\n", total_bit_error); printf ("total frame error = %d\n", total_frame_error); printf ("BER = %1.2e, FER = %1.2e\n", (float) total_bit_error/total_codeword/INFO_LEN, (float) total_frame_error/total_codeword); } // end of the MAX frame error. }// end of the snr loop cudaFree(dev_llr); cudaFree(dev_dt); cudaFree(dev_R); cudaFree(dev_hard_decision); cudaFree(dev_h_compact1); cudaFree(dev_h_compact2); cudaFree(dev_h_element_count1); cudaFree(dev_h_element_count2); free(info_bin); free(codeword); free(trans); free(recv); free(llr); free(llr_gpu); free(hard_decision_gpu); free(info_bin_gpu); return 0; }
10f599b48bf70e799a8a3847334b3e8a17cfe6f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "main.cuh" int main(int argc, char** argv){ std::string image_filename("images/Carre.pgm"); int iter = 100; int size = 1024*1024; if ( argc > 2 ) { image_filename = argv[1]; iter = atoi(argv[2]); } if( argc > 3 ){ size = atoi(argv[3]); size = size*size; std::cout << "Dotp vec size : " << size << std::endl; } srand(time(NULL)); unsigned width, height; get_source_params(image_filename, &height, &width); std::cout << "Image Dimensions: " << width << "x" << height << std::endl; u_char **Source; image<u_char> imgSource(height, width, &Source); auto fail = init_source_image(Source, image_filename, height, width); if (fail) { std::cout << "Chargement impossible de l'image" << std::endl; return 0; } float *vec1, *vec2; vec1 = (float*)malloc(size*sizeof(float)); vec2 = (float*)malloc(size*sizeof(float)); for (int i = 0; i < size; i++) { vec1[i] = rand_float(); vec2[i] = rand_float(); } #ifdef EX1 std::cout << "Exercice 1 : Dot Product\n====================================" << std::endl; run_exercice1(vec1,vec2, size, BLOCKDIM_X, iter); #endif #ifdef EX2 std::cout << "\nExercice 2 : Sobel Filter\n====================================" << std::endl; run_exercice2(Source, width, height, iter); #endif #ifdef EX3 std::cout << "\nExercice 3 : Matrix Transposition\n====================================" << std::endl; run_exercice3(Source, width, height, iter); #endif #ifdef EX4 std::cout << "\nExercice 4: Image Histogram\n====================================" << std::endl; run_exercice4(Source, height, width, iter); #endif free(vec1); free(vec2); } void run_exercice1(float* vec1, float* vec2, int size, int k, int iter){ timer avg_times = { 0 }; timer times = { 0 }; float res_cpu = 0; float res_gpu = 0; float *d_vec1CUDA, *d_vec2CUDA, *d_resCUDA; for( int i = 0; i < iter; i++){ hipMalloc(&d_vec1CUDA, size*sizeof(float)); hipMalloc(&d_vec2CUDA, size*sizeof(float)); hipMalloc(&d_resCUDA, sizeof(float)); // GPU Benchmark times.gpu_time_total = omp_get_wtime(); hipMemcpy(d_vec1CUDA, vec1, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_vec2CUDA, vec2, size*sizeof(float), hipMemcpyHostToDevice); // Kernel benchmark times.gpu_time_kernel = omp_get_wtime(); hipLaunchKernelGGL(( gpu_dotp_kernel), dim3((size+k)/k), dim3(k), 0, 0, size, d_vec1CUDA, d_vec2CUDA, d_resCUDA); times.gpu_time_kernel = omp_get_wtime() - times.gpu_time_kernel; avg_times.gpu_time_kernel += times.gpu_time_kernel; hipMemcpy(&res_gpu, d_resCUDA, sizeof(float), hipMemcpyDeviceToHost); times.gpu_time_total = omp_get_wtime() - times.gpu_time_total; avg_times.gpu_time_total += times.gpu_time_total; // CPU Benchmark times.cpu_time = omp_get_wtime(); res_cpu = cpu_dotp(vec1,vec2,size); times.cpu_time = omp_get_wtime() - times.cpu_time; avg_times.cpu_time += times.cpu_time; hipFree(d_vec1CUDA); hipFree(d_vec2CUDA); hipFree(d_resCUDA); } // Check if(res_cpu != res_gpu){ std::cout << "Cpu dot prod: " << res_cpu << std::endl; std::cout << "Gpu dot prod: " << res_gpu << std::endl; std::cout << "Absolute Error: " << fabs(res_cpu-res_gpu) << std::endl; std::cout << "Error: dot product result different" << std::endl; } timer_avg(&avg_times, iter, size); display_timer(avg_times); } void run_exercice2(u_char** Source, unsigned width, unsigned height, int iter){ u_char** ResultatCPU, **ResultatGPU, **ResultatGPUShared; u_char *d_ResultatCUDAShared, *d_ResultatCUDA, *d_SourceCUDA; image<u_char> imgResultatCPU(height, width, &ResultatCPU); image<u_char> imgResultatGPU(height, width, &ResultatGPU); image<u_char> imgResultatGPUShared(height, width, &ResultatGPUShared); timer times_naive = { 0 }; timer times_shared = { 0 }; timer avg_times_naive = { 0 }; timer avg_times_shared = { 0 }; for( int i = 0; i < iter; i++){ hipMalloc(&d_SourceCUDA, height*width*sizeof(u_char)); hipMalloc(&d_ResultatCUDA, height*width*sizeof(u_char)); hipMalloc(&d_ResultatCUDAShared, height*width*sizeof(u_char)); dim3 threads(BLOCKDIM_X,BLOCKDIM_Y); dim3 blocks(width/BLOCKDIM_X,height/BLOCKDIM_Y); // GPU Naive Benchmark times_naive.gpu_time_total = omp_get_wtime(); hipMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), hipMemcpyHostToDevice); times_naive.gpu_time_kernel = omp_get_wtime(); hipLaunchKernelGGL(( gpu_sobel_kernel_naive), dim3(blocks),dim3(threads), 0, 0, d_SourceCUDA, d_ResultatCUDA, width, height); times_naive.gpu_time_kernel = omp_get_wtime() - times_naive.gpu_time_kernel; avg_times_naive.gpu_time_kernel += times_naive.gpu_time_kernel; hipMemcpy(ResultatGPU[0], d_ResultatCUDA, height*width*sizeof(u_char), hipMemcpyDeviceToHost); times_naive.gpu_time_total = omp_get_wtime() - times_naive.gpu_time_total; avg_times_naive.gpu_time_total += times_naive.gpu_time_total; dim3 blocks2(width/(BLOCKDIM_X-2),height/(BLOCKDIM_Y-2)); // GPU Shared Benchmark times_shared.gpu_time_total = omp_get_wtime(); hipMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), hipMemcpyHostToDevice); times_shared.gpu_time_kernel = omp_get_wtime(); hipLaunchKernelGGL(( gpu_sobel_kernel_shared), dim3(blocks2),dim3(threads), 0, 0, d_SourceCUDA, d_ResultatCUDAShared, width, height); times_shared.gpu_time_kernel = omp_get_wtime() - times_shared.gpu_time_kernel; avg_times_shared.gpu_time_kernel += times_shared.gpu_time_kernel; hipMemcpy(ResultatGPUShared[0], d_ResultatCUDAShared, height*width*sizeof(u_char), hipMemcpyDeviceToHost); times_shared.gpu_time_total = omp_get_wtime() - times_shared.gpu_time_total; avg_times_shared.gpu_time_total += times_shared.gpu_time_total; // CPU Benchmark times_naive.cpu_time = omp_get_wtime(); cpu_sobel(Source, ResultatCPU, width, height); times_naive.cpu_time = omp_get_wtime() - times_naive.cpu_time; times_shared.cpu_time = times_naive.cpu_time; avg_times_naive.cpu_time += times_naive.cpu_time; avg_times_shared.cpu_time += times_shared.cpu_time; hipFree(d_SourceCUDA); hipFree(d_ResultatCUDA); hipFree(d_ResultatCUDAShared); } std::string image_filename=std::string("images/Resultats/Sobel_cpu.pgm"); save_gray_level_image(&imgResultatCPU, image_filename, height, width); image_filename=std::string("images/Resultats/Sobel_gpu.pgm"); save_gray_level_image(&imgResultatGPU, image_filename, height, width); image_filename=std::string("images/Resultats/Sobel_gpu_shared.pgm"); save_gray_level_image(&imgResultatGPUShared, image_filename, height, width); std::cout << std::endl << "Naive GPU Algorithm (not coalescent):" << std::endl; timer_avg(&avg_times_naive, iter, height*width); display_timer(avg_times_naive); std::cout << std::endl << "Shared GPU Algorithm (coalescent):" << std::endl; timer_avg(&avg_times_shared, iter, height*width); display_timer(avg_times_shared); } void run_exercice3(u_char** Source, unsigned width, unsigned height, int iter){ u_char** ResultatCPU, **ResultatGPU, **ResultatGPUShared; u_char *d_ResultatCUDAShared, *d_ResultatCUDA, *d_SourceCUDA; image<u_char> imgResultatCPU(width, height, &ResultatCPU); image<u_char> imgResultatGPU(width, height, &ResultatGPU); image<u_char> imgResultatGPUShared(width, height, &ResultatGPUShared); timer times_naive = { 0 }; timer times_shared = { 0 }; timer avg_times_naive = { 0 }; timer avg_times_shared = { 0 }; for( int i = 0; i < iter; i++){ hipMalloc(&d_SourceCUDA, height*width*sizeof(u_char)); hipMalloc(&d_ResultatCUDA, height*width*sizeof(u_char)); hipMalloc(&d_ResultatCUDAShared, height*width*sizeof(u_char)); dim3 threads(BLOCKDIM_X,BLOCKDIM_Y); dim3 blocks((width/BLOCKDIM_X)+1,(height/BLOCKDIM_Y)+1); // GPU Naive Benchmark times_naive.gpu_time_total = omp_get_wtime(); hipMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), hipMemcpyHostToDevice); times_naive.gpu_time_kernel = omp_get_wtime(); hipLaunchKernelGGL(( gpu_transpo_kernel_naive), dim3(blocks),dim3(threads), 0, 0, d_SourceCUDA, d_ResultatCUDA, width, height); times_naive.gpu_time_kernel = omp_get_wtime() - times_naive.gpu_time_kernel; avg_times_naive.gpu_time_kernel += times_naive.gpu_time_kernel; hipMemcpy(ResultatGPU[0], d_ResultatCUDA, height*width*sizeof(u_char), hipMemcpyDeviceToHost); times_naive.gpu_time_total = omp_get_wtime() - times_naive.gpu_time_total; avg_times_naive.gpu_time_total += times_naive.gpu_time_total; // GPU Shared Benchmark times_shared.gpu_time_total = omp_get_wtime(); hipMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), hipMemcpyHostToDevice); times_shared.gpu_time_kernel = omp_get_wtime(); hipLaunchKernelGGL(( gpu_transpo_kernel_shared), dim3(blocks),dim3(threads), 0, 0, d_SourceCUDA, d_ResultatCUDAShared, width, height); times_shared.gpu_time_kernel = omp_get_wtime() - times_shared.gpu_time_kernel; avg_times_shared.gpu_time_kernel += times_shared.gpu_time_kernel; hipMemcpy(ResultatGPUShared[0], d_ResultatCUDAShared, height*width*sizeof(u_char), hipMemcpyDeviceToHost); times_shared.gpu_time_total = omp_get_wtime() - times_shared.gpu_time_total; avg_times_shared.gpu_time_total += times_shared.gpu_time_total; // CPU Benchmark times_naive.cpu_time = omp_get_wtime(); cpu_transpo(Source, ResultatCPU, width, height); times_naive.cpu_time = omp_get_wtime() - times_naive.cpu_time; times_shared.cpu_time = times_naive.cpu_time; avg_times_naive.cpu_time += times_naive.cpu_time; avg_times_shared.cpu_time += times_shared.cpu_time; hipFree(d_SourceCUDA); hipFree(d_ResultatCUDA); hipFree(d_ResultatCUDAShared); } std::string image_filename=std::string("images/Resultats/Transpo_cpu.pgm"); save_gray_level_image(&imgResultatCPU, image_filename, width, height); image_filename=std::string("images/Resultats/Transpo_gpu.pgm"); save_gray_level_image(&imgResultatGPU, image_filename, width, height); image_filename=std::string("images/Resultats/Transpo_gpu_shared.pgm"); save_gray_level_image(&imgResultatGPUShared, image_filename, width, height); std::cout << std::endl << "Naive GPU Algorithm (not coalescent):" << std::endl; timer_avg(&avg_times_naive, iter, height*width); display_timer(avg_times_naive); std::cout << std::endl << "Shared GPU Algorithm (coalescent):" << std::endl; timer_avg(&avg_times_shared, iter, height*width); display_timer(avg_times_shared); } void run_exercice4(u_char** Source, unsigned height, unsigned width, int iter) { timer times_naive = { 0 }; timer avg_times_naive = { 0 }; timer times_shared = { 0 }; timer avg_times_shared = { 0 }; int resCPU[256] = { 0 }; int resGPU[256] = { 0 }; int resGPUShared[256] = { 0 }; u_char *d_SourceCUDA; int *d_resCUDA, *d_resCUDAShared; hipMalloc(&d_SourceCUDA, height*width*sizeof(u_char)); hipMalloc(&d_resCUDA, 256*sizeof(int)); hipMalloc(&d_resCUDAShared, 256*sizeof(int)); dim3 threads(BLOCKDIM_X,BLOCKDIM_Y); dim3 blocks((width/BLOCKDIM_X)+1,(height/BLOCKDIM_Y)+1); // GPU Naive Benchmark times_naive.gpu_time_total = omp_get_wtime(); hipMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), hipMemcpyHostToDevice); times_naive.gpu_time_kernel = omp_get_wtime(); hipLaunchKernelGGL(( gpu_histo_kernel_naive), dim3(blocks),dim3(threads), 0, 0, d_SourceCUDA, d_resCUDA, height, width); times_naive.gpu_time_kernel = omp_get_wtime() - times_naive.gpu_time_kernel; avg_times_naive.gpu_time_kernel += times_naive.gpu_time_kernel; hipMemcpy(resGPU, d_resCUDA, 256*sizeof(int), hipMemcpyDeviceToHost); times_naive.gpu_time_total = omp_get_wtime() - times_naive.gpu_time_total; avg_times_naive.gpu_time_total += times_naive.gpu_time_total; // GPU Shared Benchmark times_shared.gpu_time_total = omp_get_wtime(); hipMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), hipMemcpyHostToDevice); times_shared.gpu_time_kernel = omp_get_wtime(); hipLaunchKernelGGL(( gpu_histo_kernel_shared), dim3(blocks),dim3(threads), 0, 0, d_SourceCUDA, d_resCUDAShared, height, width); times_shared.gpu_time_kernel = omp_get_wtime() - times_shared.gpu_time_kernel; avg_times_shared.gpu_time_kernel += times_shared.gpu_time_kernel; hipMemcpy(resGPUShared, d_resCUDAShared, 256*sizeof(int), hipMemcpyDeviceToHost); times_shared.gpu_time_total = omp_get_wtime() - times_shared.gpu_time_total; avg_times_shared.gpu_time_total += times_shared.gpu_time_total; times_naive.cpu_time = omp_get_wtime(); cpu_histo(Source, &resCPU, height, width); times_naive.cpu_time = omp_get_wtime() - times_naive.cpu_time; times_shared.cpu_time = times_naive.cpu_time; avg_times_naive.cpu_time += times_naive.cpu_time; avg_times_shared.cpu_time += times_shared.cpu_time; hipFree(d_SourceCUDA); hipFree(d_resCUDA); hipFree(d_resCUDAShared); std::cout << std::endl << "Naive GPU Algorithm:" << std::endl; timer_avg(&avg_times_naive, iter, height*width); display_timer(avg_times_naive); std::cout << std::endl << "Shared GPU Algorithm:" << std::endl; timer_avg(&avg_times_shared, iter, height*width); display_timer(avg_times_shared); // display_vec(resCPU, 256, g_int); // display_vec(resGPU, 256, g_int); // display_vec(resGPUShared, 256, g_int); }
10f599b48bf70e799a8a3847334b3e8a17cfe6f5.cu
#include "main.cuh" int main(int argc, char** argv){ std::string image_filename("images/Carre.pgm"); int iter = 100; int size = 1024*1024; if ( argc > 2 ) { image_filename = argv[1]; iter = atoi(argv[2]); } if( argc > 3 ){ size = atoi(argv[3]); size = size*size; std::cout << "Dotp vec size : " << size << std::endl; } srand(time(NULL)); unsigned width, height; get_source_params(image_filename, &height, &width); std::cout << "Image Dimensions: " << width << "x" << height << std::endl; u_char **Source; image<u_char> imgSource(height, width, &Source); auto fail = init_source_image(Source, image_filename, height, width); if (fail) { std::cout << "Chargement impossible de l'image" << std::endl; return 0; } float *vec1, *vec2; vec1 = (float*)malloc(size*sizeof(float)); vec2 = (float*)malloc(size*sizeof(float)); for (int i = 0; i < size; i++) { vec1[i] = rand_float(); vec2[i] = rand_float(); } #ifdef EX1 std::cout << "Exercice 1 : Dot Product\n====================================" << std::endl; run_exercice1(vec1,vec2, size, BLOCKDIM_X, iter); #endif #ifdef EX2 std::cout << "\nExercice 2 : Sobel Filter\n====================================" << std::endl; run_exercice2(Source, width, height, iter); #endif #ifdef EX3 std::cout << "\nExercice 3 : Matrix Transposition\n====================================" << std::endl; run_exercice3(Source, width, height, iter); #endif #ifdef EX4 std::cout << "\nExercice 4: Image Histogram\n====================================" << std::endl; run_exercice4(Source, height, width, iter); #endif free(vec1); free(vec2); } void run_exercice1(float* vec1, float* vec2, int size, int k, int iter){ timer avg_times = { 0 }; timer times = { 0 }; float res_cpu = 0; float res_gpu = 0; float *d_vec1CUDA, *d_vec2CUDA, *d_resCUDA; for( int i = 0; i < iter; i++){ cudaMalloc(&d_vec1CUDA, size*sizeof(float)); cudaMalloc(&d_vec2CUDA, size*sizeof(float)); cudaMalloc(&d_resCUDA, sizeof(float)); // GPU Benchmark times.gpu_time_total = omp_get_wtime(); cudaMemcpy(d_vec1CUDA, vec1, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_vec2CUDA, vec2, size*sizeof(float), cudaMemcpyHostToDevice); // Kernel benchmark times.gpu_time_kernel = omp_get_wtime(); gpu_dotp_kernel<<<(size+k)/k, k>>>(size, d_vec1CUDA, d_vec2CUDA, d_resCUDA); times.gpu_time_kernel = omp_get_wtime() - times.gpu_time_kernel; avg_times.gpu_time_kernel += times.gpu_time_kernel; cudaMemcpy(&res_gpu, d_resCUDA, sizeof(float), cudaMemcpyDeviceToHost); times.gpu_time_total = omp_get_wtime() - times.gpu_time_total; avg_times.gpu_time_total += times.gpu_time_total; // CPU Benchmark times.cpu_time = omp_get_wtime(); res_cpu = cpu_dotp(vec1,vec2,size); times.cpu_time = omp_get_wtime() - times.cpu_time; avg_times.cpu_time += times.cpu_time; cudaFree(d_vec1CUDA); cudaFree(d_vec2CUDA); cudaFree(d_resCUDA); } // Check if(res_cpu != res_gpu){ std::cout << "Cpu dot prod: " << res_cpu << std::endl; std::cout << "Gpu dot prod: " << res_gpu << std::endl; std::cout << "Absolute Error: " << fabs(res_cpu-res_gpu) << std::endl; std::cout << "Error: dot product result different" << std::endl; } timer_avg(&avg_times, iter, size); display_timer(avg_times); } void run_exercice2(u_char** Source, unsigned width, unsigned height, int iter){ u_char** ResultatCPU, **ResultatGPU, **ResultatGPUShared; u_char *d_ResultatCUDAShared, *d_ResultatCUDA, *d_SourceCUDA; image<u_char> imgResultatCPU(height, width, &ResultatCPU); image<u_char> imgResultatGPU(height, width, &ResultatGPU); image<u_char> imgResultatGPUShared(height, width, &ResultatGPUShared); timer times_naive = { 0 }; timer times_shared = { 0 }; timer avg_times_naive = { 0 }; timer avg_times_shared = { 0 }; for( int i = 0; i < iter; i++){ cudaMalloc(&d_SourceCUDA, height*width*sizeof(u_char)); cudaMalloc(&d_ResultatCUDA, height*width*sizeof(u_char)); cudaMalloc(&d_ResultatCUDAShared, height*width*sizeof(u_char)); dim3 threads(BLOCKDIM_X,BLOCKDIM_Y); dim3 blocks(width/BLOCKDIM_X,height/BLOCKDIM_Y); // GPU Naive Benchmark times_naive.gpu_time_total = omp_get_wtime(); cudaMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), cudaMemcpyHostToDevice); times_naive.gpu_time_kernel = omp_get_wtime(); gpu_sobel_kernel_naive<<<blocks,threads>>>(d_SourceCUDA, d_ResultatCUDA, width, height); times_naive.gpu_time_kernel = omp_get_wtime() - times_naive.gpu_time_kernel; avg_times_naive.gpu_time_kernel += times_naive.gpu_time_kernel; cudaMemcpy(ResultatGPU[0], d_ResultatCUDA, height*width*sizeof(u_char), cudaMemcpyDeviceToHost); times_naive.gpu_time_total = omp_get_wtime() - times_naive.gpu_time_total; avg_times_naive.gpu_time_total += times_naive.gpu_time_total; dim3 blocks2(width/(BLOCKDIM_X-2),height/(BLOCKDIM_Y-2)); // GPU Shared Benchmark times_shared.gpu_time_total = omp_get_wtime(); cudaMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), cudaMemcpyHostToDevice); times_shared.gpu_time_kernel = omp_get_wtime(); gpu_sobel_kernel_shared<<<blocks2,threads>>>(d_SourceCUDA, d_ResultatCUDAShared, width, height); times_shared.gpu_time_kernel = omp_get_wtime() - times_shared.gpu_time_kernel; avg_times_shared.gpu_time_kernel += times_shared.gpu_time_kernel; cudaMemcpy(ResultatGPUShared[0], d_ResultatCUDAShared, height*width*sizeof(u_char), cudaMemcpyDeviceToHost); times_shared.gpu_time_total = omp_get_wtime() - times_shared.gpu_time_total; avg_times_shared.gpu_time_total += times_shared.gpu_time_total; // CPU Benchmark times_naive.cpu_time = omp_get_wtime(); cpu_sobel(Source, ResultatCPU, width, height); times_naive.cpu_time = omp_get_wtime() - times_naive.cpu_time; times_shared.cpu_time = times_naive.cpu_time; avg_times_naive.cpu_time += times_naive.cpu_time; avg_times_shared.cpu_time += times_shared.cpu_time; cudaFree(d_SourceCUDA); cudaFree(d_ResultatCUDA); cudaFree(d_ResultatCUDAShared); } std::string image_filename=std::string("images/Resultats/Sobel_cpu.pgm"); save_gray_level_image(&imgResultatCPU, image_filename, height, width); image_filename=std::string("images/Resultats/Sobel_gpu.pgm"); save_gray_level_image(&imgResultatGPU, image_filename, height, width); image_filename=std::string("images/Resultats/Sobel_gpu_shared.pgm"); save_gray_level_image(&imgResultatGPUShared, image_filename, height, width); std::cout << std::endl << "Naive GPU Algorithm (not coalescent):" << std::endl; timer_avg(&avg_times_naive, iter, height*width); display_timer(avg_times_naive); std::cout << std::endl << "Shared GPU Algorithm (coalescent):" << std::endl; timer_avg(&avg_times_shared, iter, height*width); display_timer(avg_times_shared); } void run_exercice3(u_char** Source, unsigned width, unsigned height, int iter){ u_char** ResultatCPU, **ResultatGPU, **ResultatGPUShared; u_char *d_ResultatCUDAShared, *d_ResultatCUDA, *d_SourceCUDA; image<u_char> imgResultatCPU(width, height, &ResultatCPU); image<u_char> imgResultatGPU(width, height, &ResultatGPU); image<u_char> imgResultatGPUShared(width, height, &ResultatGPUShared); timer times_naive = { 0 }; timer times_shared = { 0 }; timer avg_times_naive = { 0 }; timer avg_times_shared = { 0 }; for( int i = 0; i < iter; i++){ cudaMalloc(&d_SourceCUDA, height*width*sizeof(u_char)); cudaMalloc(&d_ResultatCUDA, height*width*sizeof(u_char)); cudaMalloc(&d_ResultatCUDAShared, height*width*sizeof(u_char)); dim3 threads(BLOCKDIM_X,BLOCKDIM_Y); dim3 blocks((width/BLOCKDIM_X)+1,(height/BLOCKDIM_Y)+1); // GPU Naive Benchmark times_naive.gpu_time_total = omp_get_wtime(); cudaMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), cudaMemcpyHostToDevice); times_naive.gpu_time_kernel = omp_get_wtime(); gpu_transpo_kernel_naive<<<blocks,threads>>>(d_SourceCUDA, d_ResultatCUDA, width, height); times_naive.gpu_time_kernel = omp_get_wtime() - times_naive.gpu_time_kernel; avg_times_naive.gpu_time_kernel += times_naive.gpu_time_kernel; cudaMemcpy(ResultatGPU[0], d_ResultatCUDA, height*width*sizeof(u_char), cudaMemcpyDeviceToHost); times_naive.gpu_time_total = omp_get_wtime() - times_naive.gpu_time_total; avg_times_naive.gpu_time_total += times_naive.gpu_time_total; // GPU Shared Benchmark times_shared.gpu_time_total = omp_get_wtime(); cudaMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), cudaMemcpyHostToDevice); times_shared.gpu_time_kernel = omp_get_wtime(); gpu_transpo_kernel_shared<<<blocks,threads>>>(d_SourceCUDA, d_ResultatCUDAShared, width, height); times_shared.gpu_time_kernel = omp_get_wtime() - times_shared.gpu_time_kernel; avg_times_shared.gpu_time_kernel += times_shared.gpu_time_kernel; cudaMemcpy(ResultatGPUShared[0], d_ResultatCUDAShared, height*width*sizeof(u_char), cudaMemcpyDeviceToHost); times_shared.gpu_time_total = omp_get_wtime() - times_shared.gpu_time_total; avg_times_shared.gpu_time_total += times_shared.gpu_time_total; // CPU Benchmark times_naive.cpu_time = omp_get_wtime(); cpu_transpo(Source, ResultatCPU, width, height); times_naive.cpu_time = omp_get_wtime() - times_naive.cpu_time; times_shared.cpu_time = times_naive.cpu_time; avg_times_naive.cpu_time += times_naive.cpu_time; avg_times_shared.cpu_time += times_shared.cpu_time; cudaFree(d_SourceCUDA); cudaFree(d_ResultatCUDA); cudaFree(d_ResultatCUDAShared); } std::string image_filename=std::string("images/Resultats/Transpo_cpu.pgm"); save_gray_level_image(&imgResultatCPU, image_filename, width, height); image_filename=std::string("images/Resultats/Transpo_gpu.pgm"); save_gray_level_image(&imgResultatGPU, image_filename, width, height); image_filename=std::string("images/Resultats/Transpo_gpu_shared.pgm"); save_gray_level_image(&imgResultatGPUShared, image_filename, width, height); std::cout << std::endl << "Naive GPU Algorithm (not coalescent):" << std::endl; timer_avg(&avg_times_naive, iter, height*width); display_timer(avg_times_naive); std::cout << std::endl << "Shared GPU Algorithm (coalescent):" << std::endl; timer_avg(&avg_times_shared, iter, height*width); display_timer(avg_times_shared); } void run_exercice4(u_char** Source, unsigned height, unsigned width, int iter) { timer times_naive = { 0 }; timer avg_times_naive = { 0 }; timer times_shared = { 0 }; timer avg_times_shared = { 0 }; int resCPU[256] = { 0 }; int resGPU[256] = { 0 }; int resGPUShared[256] = { 0 }; u_char *d_SourceCUDA; int *d_resCUDA, *d_resCUDAShared; cudaMalloc(&d_SourceCUDA, height*width*sizeof(u_char)); cudaMalloc(&d_resCUDA, 256*sizeof(int)); cudaMalloc(&d_resCUDAShared, 256*sizeof(int)); dim3 threads(BLOCKDIM_X,BLOCKDIM_Y); dim3 blocks((width/BLOCKDIM_X)+1,(height/BLOCKDIM_Y)+1); // GPU Naive Benchmark times_naive.gpu_time_total = omp_get_wtime(); cudaMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), cudaMemcpyHostToDevice); times_naive.gpu_time_kernel = omp_get_wtime(); gpu_histo_kernel_naive<<<blocks,threads>>>(d_SourceCUDA, d_resCUDA, height, width); times_naive.gpu_time_kernel = omp_get_wtime() - times_naive.gpu_time_kernel; avg_times_naive.gpu_time_kernel += times_naive.gpu_time_kernel; cudaMemcpy(resGPU, d_resCUDA, 256*sizeof(int), cudaMemcpyDeviceToHost); times_naive.gpu_time_total = omp_get_wtime() - times_naive.gpu_time_total; avg_times_naive.gpu_time_total += times_naive.gpu_time_total; // GPU Shared Benchmark times_shared.gpu_time_total = omp_get_wtime(); cudaMemcpy(d_SourceCUDA, Source[0], height*width*sizeof(u_char), cudaMemcpyHostToDevice); times_shared.gpu_time_kernel = omp_get_wtime(); gpu_histo_kernel_shared<<<blocks,threads>>>(d_SourceCUDA, d_resCUDAShared, height, width); times_shared.gpu_time_kernel = omp_get_wtime() - times_shared.gpu_time_kernel; avg_times_shared.gpu_time_kernel += times_shared.gpu_time_kernel; cudaMemcpy(resGPUShared, d_resCUDAShared, 256*sizeof(int), cudaMemcpyDeviceToHost); times_shared.gpu_time_total = omp_get_wtime() - times_shared.gpu_time_total; avg_times_shared.gpu_time_total += times_shared.gpu_time_total; times_naive.cpu_time = omp_get_wtime(); cpu_histo(Source, &resCPU, height, width); times_naive.cpu_time = omp_get_wtime() - times_naive.cpu_time; times_shared.cpu_time = times_naive.cpu_time; avg_times_naive.cpu_time += times_naive.cpu_time; avg_times_shared.cpu_time += times_shared.cpu_time; cudaFree(d_SourceCUDA); cudaFree(d_resCUDA); cudaFree(d_resCUDAShared); std::cout << std::endl << "Naive GPU Algorithm:" << std::endl; timer_avg(&avg_times_naive, iter, height*width); display_timer(avg_times_naive); std::cout << std::endl << "Shared GPU Algorithm:" << std::endl; timer_avg(&avg_times_shared, iter, height*width); display_timer(avg_times_shared); // display_vec(resCPU, 256, g_int); // display_vec(resGPU, 256, g_int); // display_vec(resGPUShared, 256, g_int); }
27991c864b0ff704fa2d3e7436b051d4bf6bba51.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "hip/hip_runtime.h" #include "../common/book.h" #include "../common/cpu_bitmap.h" #define DIM 1024 #define rnd( x ) (x * rand() / RAND_MAX) #define INF 2e10f struct Sphere { float r,b,g; float radius; float x,y,z; __device__ float hit( float ox, float oy, float *n ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } }; #define SPHERES 20 __constant__ Sphere s[SPHERES]; __global__ void kernel( unsigned char *ptr ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float ox = (x - DIM/2); float oy = (y - DIM/2); float r=0, g=0, b=0; float maxz = -INF; for(int i=0; i<SPHERES; i++) { float n; float t = s[i].hit( ox, oy, &n ); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset*4 + 0] = (int)(r * 255); ptr[offset*4 + 1] = (int)(g * 255); ptr[offset*4 + 2] = (int)(b * 255); ptr[offset*4 + 3] = 255; } // globals needed by the update routine struct DataBlock { unsigned char *dev_bitmap; }; int main( void ) { DataBlock data; // capture the start time hipEvent_t start, stop; HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); HANDLE_ERROR( hipEventRecord( start, 0 ) ); CPUBitmap bitmap( DIM, DIM, &data ); unsigned char *dev_bitmap; // allocate memory on the GPU for the output bitmap HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap, bitmap.image_size() ) ); // allocate temp memory, initialize it, copy to constant // memory on the GPU, then free our temp memory Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES ); for (int i=0; i<SPHERES; i++) { temp_s[i].r = rnd( 1.0f ); temp_s[i].g = rnd( 1.0f ); temp_s[i].b = rnd( 1.0f ); temp_s[i].x = rnd( 1000.0f ) - 500; temp_s[i].y = rnd( 1000.0f ) - 500; temp_s[i].z = rnd( 1000.0f ) - 500; temp_s[i].radius = rnd( 100.0f ) + 20; } HANDLE_ERROR( hipMemcpyToSymbol( s, temp_s, sizeof(Sphere) * SPHERES) ); free( temp_s ); // generate a bitmap from our sphere data dim3 grids(DIM/16,DIM/16); dim3 threads(16,16); hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, dev_bitmap ); // copy our bitmap back from the GPU for display HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost ) ); // get stop time, and display the timing results HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time to generate: %3.1f ms\n", elapsedTime ); HANDLE_ERROR( hipEventDestroy( start ) ); HANDLE_ERROR( hipEventDestroy( stop ) ); HANDLE_ERROR( hipFree( dev_bitmap ) ); // display bitmap.display_and_exit(); }
27991c864b0ff704fa2d3e7436b051d4bf6bba51.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "cuda.h" #include "../common/book.h" #include "../common/cpu_bitmap.h" #define DIM 1024 #define rnd( x ) (x * rand() / RAND_MAX) #define INF 2e10f struct Sphere { float r,b,g; float radius; float x,y,z; __device__ float hit( float ox, float oy, float *n ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } }; #define SPHERES 20 __constant__ Sphere s[SPHERES]; __global__ void kernel( unsigned char *ptr ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float ox = (x - DIM/2); float oy = (y - DIM/2); float r=0, g=0, b=0; float maxz = -INF; for(int i=0; i<SPHERES; i++) { float n; float t = s[i].hit( ox, oy, &n ); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset*4 + 0] = (int)(r * 255); ptr[offset*4 + 1] = (int)(g * 255); ptr[offset*4 + 2] = (int)(b * 255); ptr[offset*4 + 3] = 255; } // globals needed by the update routine struct DataBlock { unsigned char *dev_bitmap; }; int main( void ) { DataBlock data; // capture the start time cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); HANDLE_ERROR( cudaEventRecord( start, 0 ) ); CPUBitmap bitmap( DIM, DIM, &data ); unsigned char *dev_bitmap; // allocate memory on the GPU for the output bitmap HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() ) ); // allocate temp memory, initialize it, copy to constant // memory on the GPU, then free our temp memory Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES ); for (int i=0; i<SPHERES; i++) { temp_s[i].r = rnd( 1.0f ); temp_s[i].g = rnd( 1.0f ); temp_s[i].b = rnd( 1.0f ); temp_s[i].x = rnd( 1000.0f ) - 500; temp_s[i].y = rnd( 1000.0f ) - 500; temp_s[i].z = rnd( 1000.0f ) - 500; temp_s[i].radius = rnd( 100.0f ) + 20; } HANDLE_ERROR( cudaMemcpyToSymbol( s, temp_s, sizeof(Sphere) * SPHERES) ); free( temp_s ); // generate a bitmap from our sphere data dim3 grids(DIM/16,DIM/16); dim3 threads(16,16); kernel<<<grids,threads>>>( dev_bitmap ); // copy our bitmap back from the GPU for display HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost ) ); // get stop time, and display the timing results HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time to generate: %3.1f ms\n", elapsedTime ); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); HANDLE_ERROR( cudaFree( dev_bitmap ) ); // display bitmap.display_and_exit(); }
7a0cdad3a9372af98ae2983c03bd9d8f1362b520.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "stream_compaction/common.h" #include "stream_compaction/efficient.h" #include "stream_compaction/radix.h" typedef int var_t; #define blocksize 128 namespace StreamCompaction { namespace Radix { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } template<typename T> T findMax(T* &arr, T n) { T max = 0; for (int i = 0; i < n; ++i) { if (arr[i] > max) { max = arr[i]; } } return max; } __global__ void getB(const int n, const int t, int *idata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = ((idata[idx] >> t) & 1); } __global__ void getE(const int n, const int t, int *bdata, int *edata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } edata[idx] = (bdata[idx] ^ 1); } __global__ void getTF(const int n, int *edata, int *fdata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = edata[idx] + fdata[idx]; } __global__ void getT(const int n, const int tf, int *fdata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = idx - fdata[idx] + tf; } __global__ void getD(const int n, int *bdata, int * tdata, int * fdata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = (bdata[idx] ? tdata[idx] : fdata[idx]); } __global__ void refill(const int n, int * d, int * idata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[d[idx]] = idata[idx]; } //template <typename var_t, const int blocksize> void sort(int n, int * odata, int * idata) { int size; int *dev_idata, *dev_odata; int *dev_b, *dev_e, *dev_f, *dev_t, *dev_d; size = n * sizeof(int); hipMalloc((void**)&dev_idata, size); hipMalloc((void**)&dev_odata, size); hipMalloc((void**)&dev_b, size); hipMalloc((void**)&dev_e, size); hipMalloc((void**)&dev_f, size); hipMalloc((void**)&dev_t, size); hipMalloc((void**)&dev_d, size); hipMemcpy(dev_idata, idata, size, hipMemcpyHostToDevice); dim3 blocksPerGrid((n + blocksize - 1) / blocksize); dim3 threadsPerBlock(blocksize); int max = findMax<int>(idata, n); int ndigit = ilog2ceil(max); timer().startGpuTimer(); for (int i = 0; i < ndigit; i++) { getB << <blocksPerGrid, threadsPerBlock >> >(n, i, dev_idata, dev_b); getE << <blocksPerGrid, threadsPerBlock >> >(n, i, dev_b, dev_e); thrust::device_ptr<int> dev_thrust_e(dev_e); thrust::device_ptr<int> dev_thrust_f(dev_f); thrust::exclusive_scan(dev_thrust_e, dev_thrust_e + n, dev_thrust_f); int tf, le; hipMemcpy(&tf, dev_f + n - 1, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&le, dev_e + n - 1, sizeof(int), hipMemcpyDeviceToHost); tf += le; getT << <blocksPerGrid, threadsPerBlock >> >(n, tf, dev_f, dev_t); getD << <blocksPerGrid, threadsPerBlock >> >(n, dev_b, dev_t, dev_f, dev_d); refill << <blocksPerGrid, threadsPerBlock >> >(n, dev_d, dev_idata, dev_odata); std::swap(dev_idata, dev_odata); } timer().endGpuTimer(); hipMemcpy(odata, dev_idata, size, hipMemcpyDeviceToHost); hipFree(dev_idata); hipFree(dev_odata); hipFree(dev_e); hipFree(dev_f); hipFree(dev_b); hipFree(dev_t); hipFree(dev_d); } } }
7a0cdad3a9372af98ae2983c03bd9d8f1362b520.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "stream_compaction/common.h" #include "stream_compaction/efficient.h" #include "stream_compaction/radix.h" typedef int var_t; #define blocksize 128 namespace StreamCompaction { namespace Radix { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } template<typename T> T findMax(T* &arr, T n) { T max = 0; for (int i = 0; i < n; ++i) { if (arr[i] > max) { max = arr[i]; } } return max; } __global__ void getB(const int n, const int t, int *idata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = ((idata[idx] >> t) & 1); } __global__ void getE(const int n, const int t, int *bdata, int *edata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } edata[idx] = (bdata[idx] ^ 1); } __global__ void getTF(const int n, int *edata, int *fdata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = edata[idx] + fdata[idx]; } __global__ void getT(const int n, const int tf, int *fdata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = idx - fdata[idx] + tf; } __global__ void getD(const int n, int *bdata, int * tdata, int * fdata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = (bdata[idx] ? tdata[idx] : fdata[idx]); } __global__ void refill(const int n, int * d, int * idata, int *odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[d[idx]] = idata[idx]; } //template <typename var_t, const int blocksize> void sort(int n, int * odata, int * idata) { int size; int *dev_idata, *dev_odata; int *dev_b, *dev_e, *dev_f, *dev_t, *dev_d; size = n * sizeof(int); cudaMalloc((void**)&dev_idata, size); cudaMalloc((void**)&dev_odata, size); cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_e, size); cudaMalloc((void**)&dev_f, size); cudaMalloc((void**)&dev_t, size); cudaMalloc((void**)&dev_d, size); cudaMemcpy(dev_idata, idata, size, cudaMemcpyHostToDevice); dim3 blocksPerGrid((n + blocksize - 1) / blocksize); dim3 threadsPerBlock(blocksize); int max = findMax<int>(idata, n); int ndigit = ilog2ceil(max); timer().startGpuTimer(); for (int i = 0; i < ndigit; i++) { getB << <blocksPerGrid, threadsPerBlock >> >(n, i, dev_idata, dev_b); getE << <blocksPerGrid, threadsPerBlock >> >(n, i, dev_b, dev_e); thrust::device_ptr<int> dev_thrust_e(dev_e); thrust::device_ptr<int> dev_thrust_f(dev_f); thrust::exclusive_scan(dev_thrust_e, dev_thrust_e + n, dev_thrust_f); int tf, le; cudaMemcpy(&tf, dev_f + n - 1, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&le, dev_e + n - 1, sizeof(int), cudaMemcpyDeviceToHost); tf += le; getT << <blocksPerGrid, threadsPerBlock >> >(n, tf, dev_f, dev_t); getD << <blocksPerGrid, threadsPerBlock >> >(n, dev_b, dev_t, dev_f, dev_d); refill << <blocksPerGrid, threadsPerBlock >> >(n, dev_d, dev_idata, dev_odata); std::swap(dev_idata, dev_odata); } timer().endGpuTimer(); cudaMemcpy(odata, dev_idata, size, cudaMemcpyDeviceToHost); cudaFree(dev_idata); cudaFree(dev_odata); cudaFree(dev_e); cudaFree(dev_f); cudaFree(dev_b); cudaFree(dev_t); cudaFree(dev_d); } } }
ad1794b1818f71fb1ef55f221c52321d5abf4569.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Copyright.h" #include "Macro.h" #include "CUPOT.h" #ifdef GAMER_DEBUG #include <stdio.h> #endif #if ( defined GRAVITY && defined GPU && POT_SCHEME == MG ) #define POT_NXT_F ( PATCH_SIZE+2*POT_GHOST_SIZE ) #define POT_NTHREAD ( POT_BLOCK_SIZE_X ) #define POT_USELESS ( POT_GHOST_SIZE%2 ) #if ( POT_NXT_F == 18 ) // for POT_NXT_F == 18, we reuse the same shared memory array due to the lack of shared memory # define REUSE_SHARED # define MAX_NLV 4U # define NBOTTOM_SMOOTH 1U # define NGRID_LV0 18U # define NGRID_LV1 9U # define NGRID_LV2 5U # define NGRID_LV3 3U #elif ( POT_NXT_F == 16 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 7U # define NGRID_LV0 16U # define NGRID_LV1 8U # define NGRID_LV2 4U #elif ( POT_NXT_F == 14 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 7U # define NGRID_LV0 14U # define NGRID_LV1 7U # define NGRID_LV2 4U #elif ( POT_NXT_F == 12 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 1U # define NGRID_LV0 12U # define NGRID_LV1 6U # define NGRID_LV2 3U #elif ( POT_NXT_F == 10 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 1U # define NGRID_LV0 10U # define NGRID_LV1 5U # define NGRID_LV2 3U #else #error ERROR : not supported POT_NXT_F #endif #if ( MAX_NLV != 3 && MAX_NLV != 4 ) #error ERROR : MAX_NLV != 3 or 4 #endif // variables reside in constant memory #include "CUPOT_PoissonSolver_SetConstMem.cu" // prototype static __device__ void LoadRho( const real *g_Rho, real *s_Rho, const real Poi_Coeff, const uint g_Idx0 ); static __device__ void Smoothing( real *Sol, const real *RHS, const real dh, const uint NGrid, const uint Idx0 ); static __device__ void ComputeDefect( const real *Sol, const real *RHS, real *Def, const real dh, const uint NGrid, const uint Idx0 ); static __device__ void Restrict( const real *FData, real *CData, const uint NGrid_F, const uint NGrid_C, const uint Idx0 ); static __device__ void Prolongate_and_Correct( const real *CData, real *FData, const uint NGrid_C, const uint NGrid_F, const uint FIdx0 ); static __device__ void EstimateError( const real *Sol, const real *RHS, const real dh, real *s_Error, real *s_SolSum, const uint tid ); //------------------------------------------------------------------------------------------------------- // Function : CUPOT_PoissonSolver_MG // Description : GPU Poisson solver using the multigrid scheme // // Note : a. Work for POT_GHOST_SIZE = 1, 2, 3, 4, 5 <--> POT_NXT_F = 10, 12, 14, 16, 18 // b. Prefix "g" for pointers pointing to the "Global" memory space // Prefix "s" for pointers pointing to the "Shared" memory space // c. Reference : Numerical Recipes, Chapter 20.6 // // Parameter : g_Rho_Array : Global memory array storing the input density // g_Pot_Array_In : Global memory array storing the input "coarse-grid" potential for ] // interpolation // g_Pot_Array_Out : Global memory array to store the output potential // dh_Min : Grid size of the input data // Max_Iter : Maximum number of iterations for multigrid // NPre_Smooth : Number of pre-smoothing steps for multigrid // NPost_Smooth : Number of post-smoothing steps for multigrid // Tolerated_Error : Maximum tolerated error for multigrid // Poi_Coeff : Coefficient in front of the RHS in the Poisson eq. // IntScheme : Interpolation scheme for potential // --> currently supported schemes include // INT_CQUAD : conservative quadratic interpolation // INT_QUAD : quadratic interpolation //--------------------------------------------------------------------------------------------------- __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ) { const uint bid = blockIdx.x; const uint tid = threadIdx.x; const uint dy = POT_NXT_F; const uint dz = POT_NXT_F*POT_NXT_F; int Iter; uint t, s_Idx; real dh[MAX_NLV]; // set the grid sizes at all different levels # if ( MAX_NLV == 4 ) const uint NGrid[MAX_NLV] = { NGRID_LV0, NGRID_LV1, NGRID_LV2, NGRID_LV3 }; # elif ( MAX_NLV == 3 ) const uint NGrid[MAX_NLV] = { NGRID_LV0, NGRID_LV1, NGRID_LV2 }; # endif dh[0] = dh_Min; for (uint Lv=1U; Lv<MAX_NLV; Lv++) dh[Lv] = dh_Min * ( NGrid[0] - 1U ) / ( NGrid[Lv] - 1U ); // allocate shared memory __shared__ real s_Sol_Lv0[ NGRID_LV0*NGRID_LV0*NGRID_LV0 ]; # ifndef FLOAT8 __shared__ real s_RHS_Lv0[ NGRID_LV0*NGRID_LV0*NGRID_LV0 ]; __shared__ real s_SolSum[POT_NTHREAD]; __shared__ real s_Error [POT_NTHREAD]; # else // shared memory is too small for double precision --> use global memory instead __shared__ real *s_RHS_Lv0; __shared__ real *s_SolSum; __shared__ real *s_Error; if ( tid == 0 ) { s_RHS_Lv0 = (real*)malloc( sizeof(real*)*NGRID_LV0*NGRID_LV0*NGRID_LV0 ); s_SolSum = (real*)malloc( sizeof(real*)*POT_NTHREAD ); s_Error = (real*)malloc( sizeof(real*)*POT_NTHREAD ); } __syncthreads(); # ifdef GAMER_DEBUG if ( tid == 0 ) { if ( s_RHS_Lv0 == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_RHS_Lv0", bid, __FUNCTION__ ); return; } if ( s_SolSum == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_SolSum", bid, __FUNCTION__ ); return; } if ( s_Error == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_Error", bid, __FUNCTION__ ); return; } } # endif # endif // #ifndef FLOAT8 ... else ... real *s_Def_Lv0 = s_RHS_Lv0; // s_Def_Lv0, s_CPot and RHS_Lv0 share the same shared-memory array real *s_CPot = s_RHS_Lv0; # ifdef REUSE_SHARED // reuse the shared-memory arrays due to the lack of shared memory real *s_Sol_Lv1 = s_RHS_Lv0; real *s_Sol_Lv2 = s_Sol_Lv1 + NGRID_LV1*NGRID_LV1*NGRID_LV1; real *s_Sol_Lv3 = s_Sol_Lv2 + NGRID_LV2*NGRID_LV2*NGRID_LV2; real *s_RHS_Lv2 = s_Sol_Lv3 + NGRID_LV3*NGRID_LV3*NGRID_LV3; real *s_RHS_Lv3 = s_RHS_Lv2 + NGRID_LV2*NGRID_LV2*NGRID_LV2; real *s_Def_Lv1 = s_RHS_Lv3 + NGRID_LV3*NGRID_LV3*NGRID_LV3; real *s_Def_Lv2 = s_Def_Lv1 + NGRID_LV1*NGRID_LV1*NGRID_LV1; real *s_Def_Lv3 = s_Def_Lv2 + NGRID_LV2*NGRID_LV2*NGRID_LV2; // use global memory for s_RHS_Lv1 because s_RHS_Lv1 and s_Def_Lv0 cannot share the same memory space __shared__ real *s_RHS_Lv1; if ( tid == 0 ) s_RHS_Lv1 = (real*) malloc( sizeof(real)*NGRID_LV1*NGRID_LV1*NGRID_LV1 ); __syncthreads(); # ifdef GAMER_DEBUG if ( tid == 0 && s_RHS_Lv1 == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_RHS_Lv1", bid, __FUNCTION__ ); return; } # endif # else // #ifdef REUSE_SHARED ... else ... __shared__ real s_Sol_Lv1[ NGRID_LV1*NGRID_LV1*NGRID_LV1 ]; __shared__ real s_Sol_Lv2[ NGRID_LV2*NGRID_LV2*NGRID_LV2 ]; __shared__ real s_RHS_Lv1[ NGRID_LV1*NGRID_LV1*NGRID_LV1 ]; __shared__ real s_RHS_Lv2[ NGRID_LV2*NGRID_LV2*NGRID_LV2 ]; __shared__ real s_Def_Lv1[ NGRID_LV1*NGRID_LV1*NGRID_LV1 ]; __shared__ real s_Def_Lv2[ NGRID_LV2*NGRID_LV2*NGRID_LV2 ]; # if ( MAX_NLV == 4 ) __shared__ real s_Sol_Lv3[ NGRID_LV3*NGRID_LV3*NGRID_LV3 ]; __shared__ real s_RHS_Lv3[ NGRID_LV3*NGRID_LV3*NGRID_LV3 ]; __shared__ real s_Def_Lv3[ NGRID_LV3*NGRID_LV3*NGRID_LV3 ]; # endif # endif // #ifdef REUSE_SHARED ... else ... # if ( MAX_NLV == 4 ) real *s_Sol[MAX_NLV] = { s_Sol_Lv0, s_Sol_Lv1, s_Sol_Lv2, s_Sol_Lv3 }; real *s_RHS[MAX_NLV] = { s_RHS_Lv0, s_RHS_Lv1, s_RHS_Lv2, s_RHS_Lv3 }; real *s_Def[MAX_NLV] = { s_Def_Lv0, s_Def_Lv1, s_Def_Lv2, s_Def_Lv3 }; # elif ( MAX_NLV == 3 ) real *s_Sol[MAX_NLV] = { s_Sol_Lv0, s_Sol_Lv1, s_Sol_Lv2 }; real *s_RHS[MAX_NLV] = { s_RHS_Lv0, s_RHS_Lv1, s_RHS_Lv2 }; real *s_Def[MAX_NLV] = { s_Def_Lv0, s_Def_Lv1, s_Def_Lv2 }; # endif s_Error[0] = __FLT_MAX__; // a. load the coarse-grid potential into shared memory // ----------------------------------------------------------------------------------------------------------- t = tid; while ( t < POT_NXT*POT_NXT*POT_NXT ) { s_CPot[t] = g_Pot_Array_In[bid][t]; t += POT_NTHREAD; } __syncthreads(); // b. evaluate the "fine-grid" potential by interpolation (as the initial guess and the B.C.) // ----------------------------------------------------------------------------------------------------------- const int N_CSlice = POT_NTHREAD / ( (POT_NXT-2)*(POT_NXT-2) ); if ( tid < N_CSlice*(POT_NXT-2)*(POT_NXT-2) ) { const real Const_8 = 1.0/8.0; const real Const_64 = 1.0/64.0; const real Const_512 = 1.0/512.0; const int Cdx = 1; const int Cdy = POT_NXT; const int Cdz = POT_NXT*POT_NXT; const int CIDx = 1 + tid % ( POT_NXT-2 ); const int CIDy = 1 + ( tid % ( (POT_NXT-2)*(POT_NXT-2) ) ) / ( POT_NXT-2 ); const int CIDz = 1 + tid / ( (POT_NXT-2)*(POT_NXT-2) ); int CID = __mul24( CIDz, Cdz ) + __mul24( CIDy, Cdy ) + __mul24( CIDx, Cdx ); const int Fdx = 1; const int Fdy = POT_NXT_F; const int Fdz = POT_NXT_F*POT_NXT_F; const int FIDx = ( (CIDx-1)<<1 ) - POT_USELESS; const int FIDy = ( (CIDy-1)<<1 ) - POT_USELESS; int FIDz = ( (CIDz-1)<<1 ) - POT_USELESS; int FID = __mul24( FIDz, Fdz ) + __mul24( FIDy, Fdy ) + __mul24( FIDx, Fdx ); real TempFPot1, TempFPot2, TempFPot3, TempFPot4, TempFPot5, TempFPot6, TempFPot7, TempFPot8; real Slope_00, Slope_01, Slope_02, Slope_03, Slope_04, Slope_05, Slope_06, Slope_07; real Slope_08, Slope_09, Slope_10, Slope_11, Slope_12; int Idx, Idy, Idz, ii, jj, kk; for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice) { switch ( IntScheme ) { /* case INT_CENTRAL : { Slope_00 = (real)0.125 * ( s_CPot[CID+Cdx] - s_CPot[CID-Cdx] ); Slope_01 = (real)0.125 * ( s_CPot[CID+Cdy] - s_CPot[CID-Cdy] ); Slope_02 = (real)0.125 * ( s_CPot[CID+Cdz] - s_CPot[CID-Cdz] ); TempFPot1 = s_CPot[CID] - Slope_00 - Slope_01 - Slope_02; TempFPot2 = s_CPot[CID] + Slope_00 - Slope_01 - Slope_02; TempFPot3 = s_CPot[CID] - Slope_00 + Slope_01 - Slope_02; TempFPot4 = s_CPot[CID] + Slope_00 + Slope_01 - Slope_02; TempFPot5 = s_CPot[CID] - Slope_00 - Slope_01 + Slope_02; TempFPot6 = s_CPot[CID] + Slope_00 - Slope_01 + Slope_02; TempFPot7 = s_CPot[CID] - Slope_00 + Slope_01 + Slope_02; TempFPot8 = s_CPot[CID] + Slope_00 + Slope_01 + Slope_02; } break; // INT_CENTRAL */ case INT_CQUAD : { Slope_00 = Const_8 * ( s_CPot[CID+Cdx ] - s_CPot[CID-Cdx ] ); Slope_01 = Const_8 * ( s_CPot[CID +Cdy ] - s_CPot[CID -Cdy ] ); Slope_02 = Const_8 * ( s_CPot[CID +Cdz] - s_CPot[CID -Cdz] ); Slope_03 = Const_64 * ( s_CPot[CID+Cdx -Cdz] - s_CPot[CID-Cdx -Cdz] ); Slope_04 = Const_64 * ( s_CPot[CID +Cdy-Cdz] - s_CPot[CID -Cdy-Cdz] ); Slope_05 = Const_64 * ( s_CPot[CID+Cdx-Cdy ] - s_CPot[CID-Cdx-Cdy ] ); Slope_06 = Const_64 * ( s_CPot[CID+Cdx+Cdy ] - s_CPot[CID-Cdx+Cdy ] ); Slope_07 = Const_64 * ( s_CPot[CID+Cdx +Cdz] - s_CPot[CID-Cdx +Cdz] ); Slope_08 = Const_64 * ( s_CPot[CID +Cdy+Cdz] - s_CPot[CID -Cdy+Cdz] ); Slope_09 = Const_512 * ( s_CPot[CID+Cdx-Cdy-Cdz] - s_CPot[CID-Cdx-Cdy-Cdz] ); Slope_10 = Const_512 * ( s_CPot[CID+Cdx+Cdy-Cdz] - s_CPot[CID-Cdx+Cdy-Cdz] ); Slope_11 = Const_512 * ( s_CPot[CID+Cdx-Cdy+Cdz] - s_CPot[CID-Cdx-Cdy+Cdz] ); Slope_12 = Const_512 * ( s_CPot[CID+Cdx+Cdy+Cdz] - s_CPot[CID-Cdx+Cdy+Cdz] ); TempFPot1 = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06 + Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot2 = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06 - Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; TempFPot3 = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06 + Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; TempFPot4 = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06 - Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot5 = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06 - Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; TempFPot6 = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06 + Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot7 = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06 - Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot8 = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06 + Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; } break; // INT_CQUAD case INT_QUAD : { TempFPot1 = TempFPot2 = TempFPot3 = TempFPot4 = (real)0.0; TempFPot5 = TempFPot6 = TempFPot7 = TempFPot8 = (real)0.0; for (int dk=-1; dk<=1; dk++) { Idz = dk+1; kk = __mul24( dk, Cdz ); for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy ); for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx ); TempFPot1 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mm[Idx]; TempFPot2 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mp[Idx]; TempFPot3 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mm[Idx]; TempFPot4 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mp[Idx]; TempFPot5 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mm[Idx]; TempFPot6 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mp[Idx]; TempFPot7 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mm[Idx]; TempFPot8 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mp[Idx]; }}} } break; // INT_QUAD } // switch ( IntScheme ) // save data to the shared-memory array. // Currently this part is highly diverge. However, since the interpolation takes much less time than the // Poisson solver does, we have not yet tried to optimize this part if ( FIDz >= 0 ) { if ( FIDx >= 0 && FIDy >= 0 ) s_Sol_Lv0[FID ] = TempFPot1; if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_Sol_Lv0[FID+Fdx ] = TempFPot2; if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID +Fdy ] = TempFPot3; if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID+Fdx+Fdy ] = TempFPot4; } if ( FIDz <= POT_NXT_F-2 ) { if ( FIDx >= 0 && FIDy >= 0 ) s_Sol_Lv0[FID +Fdz] = TempFPot5; if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_Sol_Lv0[FID+Fdx +Fdz] = TempFPot6; if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID +Fdy+Fdz] = TempFPot7; if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID+Fdx+Fdy+Fdz] = TempFPot8; } CID += __mul24( N_CSlice, Cdz ); FID += __mul24( 2*N_CSlice, Fdz ); FIDz += 2*N_CSlice; } // for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice) } // if ( tid < N_CSlice*(POT_NXT-2)*(POT_NXT-2) ) __syncthreads(); // c1. initialize s_Def_Lv{0-3} as zero (just to make sure that the boundary cells of s_Def_Lv{0-3} are zero) // (note that s_Def_Lv0 and s_CPot share the same array) // ----------------------------------------------------------------------------------------------------------- # ifndef REUSE_SHARED t = tid; while ( t < NGRID_LV0*NGRID_LV0*NGRID_LV0 ) { s_Def_Lv0[t] = (real)0.0; t += POT_NTHREAD; } t = tid; while ( t < NGRID_LV1*NGRID_LV1*NGRID_LV1 ) { s_Def_Lv1[t] = (real)0.0; t += POT_NTHREAD; } t = tid; while ( t < NGRID_LV2*NGRID_LV2*NGRID_LV2 ) { s_Def_Lv2[t] = (real)0.0; t += POT_NTHREAD; } # if ( MAX_NLV == 4 ) t = tid; while ( t < NGRID_LV3*NGRID_LV3*NGRID_LV3 ) { s_Def_Lv3[t] = (real)0.0; t += POT_NTHREAD; } # endif __syncthreads(); # endif // c2 load density into shared memory LoadRho( g_Rho_Array[bid], s_RHS_Lv0, Poi_Coeff, tid ); // d. use the MG scheme to evaluate potential // ----------------------------------------------------------------------------------------------------------- Iter = 0; while ( Iter < Max_Iter && s_Error[0] > Tolerated_Error ) { // V-cycle : finer --> coarser grids for (int Lv=0; Lv<MAX_NLV-1; Lv++) { // pre-smoothing (apply relaxation to compute solution/correction) for (int PreStep=0; PreStep<NPre_Smooth; PreStep++) Smoothing( s_Sol[Lv], s_RHS[Lv], dh[Lv], NGrid[Lv], tid ); // compute defect ComputeDefect( s_Sol[Lv], s_RHS[Lv], s_Def[Lv], dh[Lv], NGrid[Lv], tid ); // restrict defect (use as the RHS at the next level) Restrict( s_Def[Lv], s_RHS[Lv+1], NGrid[Lv], NGrid[Lv+1], tid ); // initialize the correction at the next level to zero t = tid; while ( t < NGrid[Lv+1]*NGrid[Lv+1]*NGrid[Lv+1] ) { s_Sol[Lv+1][t] = (real)0.0; t += POT_NTHREAD; } __syncthreads(); } // calculate the correction at the bottom level for (int BottomStep=0; BottomStep<NBOTTOM_SMOOTH; BottomStep++) Smoothing( s_Sol[MAX_NLV-1], s_RHS[MAX_NLV-1], dh[MAX_NLV-1], NGrid[MAX_NLV-1], tid ); // V-cycle : coarser --> finer grids for (int Lv=MAX_NLV-2; Lv>=0; Lv--) { // prolongate correction (from Lv+1 to Lv) and correct solution/correction at Lv Prolongate_and_Correct( s_Sol[Lv+1], s_Sol[Lv], NGrid[Lv+1], NGrid[Lv], tid ); // load s_RHS_Lv0 from global memory since s_RHS_Lv0 and s_Def_Lv0 share the same shared-memory array if ( Lv == 0 ) LoadRho( g_Rho_Array[bid], s_RHS_Lv0, Poi_Coeff, tid ); // post-smoothing (apply relaxation to compute solution/correction again) for (int PostStep=0; PostStep<NPost_Smooth; PostStep++) Smoothing( s_Sol[Lv], s_RHS[Lv], dh[Lv], NGrid[Lv], tid ); } // estimate error EstimateError( s_Sol[0], s_RHS[0], dh[0], s_Error, s_SolSum, tid ); Iter ++; // if ( tid == 0 && bid == 0 ) printf( "Patch %3d, Iter %3d: Error = %13.7e\n", bid, Iter, s_Error[0] ); } // while ( Iter < Max_Iter && Error > Tolerated_Error ) // e. store potential back to global memory // ----------------------------------------------------------------------------------------------------------- t = tid; while ( t < GRA_NXT*GRA_NXT*GRA_NXT ) { s_Idx = __umul24( t/(GRA_NXT*GRA_NXT) + POT_GHOST_SIZE - GRA_GHOST_SIZE, dz ) + __umul24( t%(GRA_NXT*GRA_NXT)/GRA_NXT + POT_GHOST_SIZE - GRA_GHOST_SIZE, dy ) + t%(GRA_NXT ) + POT_GHOST_SIZE - GRA_GHOST_SIZE; g_Pot_Array_Out[bid][t] = s_Sol_Lv0[s_Idx]; t += POT_NTHREAD; } // free memory # ifdef REUSE_SHARED if ( tid == 0 ) free( s_RHS_Lv1 ); # endif # ifdef FLOAT8 if ( tid == 0 ) { free( s_RHS_Lv0 ); free( s_SolSum ); free( s_Error ); } # endif } // FUNCTION : CUPOT_PoissonSolver_MG //------------------------------------------------------------------------------------------------------- // Function : Smoothing // Description : Use Gauss-Seidel method for smoothing // // Note : 1. B.C. should be stored in the input array "Sol" // 2. "__syncthreads" is invoked in the end of this function // // Parameter : Sol : 1D array to store the output solution to the Poisson equation // RHS : 1D array storing the RHS of the Poisson equation // dh : Grid size // NGrid : Number of cells in each spatial direction for Sol and RHS // Idx0 : Starting cell index //------------------------------------------------------------------------------------------------------- __device__ void Smoothing( real *Sol, const real *RHS, const real dh, const uint NGrid, const uint Idx0 ) { const real dh2 = dh*dh; const real One_Six = (real)1.0/(real)6.0; const uint NGrid_m2 = NGrid - 2U; const uint NGrid2 = __umul24( NGrid, NGrid ); const uint NGrid_2 = (NGrid_m2+1)>>1U; // if NGrid is an odd number, one cell is padded const uint NGrid2_2 = __umul24( NGrid_m2, NGrid_2 ); const uint NInner_2 = __umul24( NGrid_m2, NGrid2_2 ); const uint di = 1U; const uint dj = NGrid; const uint dk = NGrid2; uint i, j, k, ip, jp, kp, im, jm, km, ijk, pass_flip; uint Idx; // odd-even ordering for (uint pass=0; pass<2U; pass++) { Idx = Idx0; pass_flip = pass & 1U; while ( Idx < NInner_2 ) { i = Idx%NGrid_2<<1U; j = Idx%NGrid2_2/NGrid_2; k = Idx/NGrid2_2; i += 1U + ( (j&1U)^(k&1U)^pass_flip ); j++; k++; ijk = __umul24( k, NGrid2 ) + __umul24( j, NGrid ) + i; ip = ijk + di; jp = ijk + dj; kp = ijk + dk; im = ijk - di; jm = ijk - dj; km = ijk - dk; // update solution //###OPTIMIZATION: try to optimize out this conditional operation (for the case that NGrid is odd) # if ( ( POT_NXT_F & (POT_NXT_F-1) ) != 0 ) if ( i <= NGrid_m2 ) # endif Sol[ijk] = One_Six*( Sol[kp] + Sol[km] + Sol[jp] + Sol[jm] + Sol[ip] + Sol[im] - dh2*RHS[ijk] ); Idx += POT_NTHREAD; } // while ( Idx < NInner_2 ) __syncthreads(); } // for (int pass=0; pass<2; pass++) } // FUNCTION : Smoothing //------------------------------------------------------------------------------------------------------- // Function : ComputeDefect // Description : Compute negative defect defined as "-(Laplacian(Sol)-RHS)" // // Note : 1. B.C. should be stored in the input array "Sol" // 2. It is assumed that the boundary values of Def have already been initialized as zero // (unless REUSE_SHARED is defined) // 3. "__syncthreads" is invoked in the end of this function // // Parameter : Sol : 1D array storing the input solution to the Poisson equation // RHS : 1D array storing the RHS of the Poisson equation // Def : 1D array to store the output defect // dh : Grid size // NGrid : Number of cells in each spatial direction for Sol and RHS // Idx0 : Starting cell index //------------------------------------------------------------------------------------------------------- __device__ void ComputeDefect( const real *Sol, const real *RHS, real *Def, const real dh, const uint NGrid, const uint Idx0 ) { const real _dh2 = (real)-1.0/(dh*dh); const uint NGrid2 = __umul24( NGrid, NGrid ); const uint NGrid_m2 = NGrid - 2U; const uint NGrid_m2_2 = __umul24( NGrid_m2, NGrid_m2 ); const uint NGrid_m2_3 = __umul24( NGrid_m2, NGrid_m2_2 ); const uint di = 1U; const uint dj = NGrid; const uint dk = NGrid2; uint i, j, k, ip, jp, kp, im, jm, km, ijk; uint Idx = Idx0; while ( Idx < NGrid_m2_3 ) { i = 1U + Idx%NGrid_m2; j = 1U + Idx%NGrid_m2_2/NGrid_m2; k = 1U + Idx/NGrid_m2_2; ijk = __umul24( k, NGrid2 ) + __umul24( j, NGrid ) + i; ip = ijk + di; jp = ijk + dj; kp = ijk + dk; im = ijk - di; jm = ijk - dj; km = ijk - dk; Def[ijk] = _dh2*( Sol[kp] + Sol[km] + Sol[jp] + Sol[jm] + Sol[ip] + Sol[im] - (real)6.0*Sol[ijk] ) + RHS[ijk]; Idx += POT_NTHREAD; } // while ( Idx < NGrid_m2_3 ) // set the boundary values as zeros # ifdef REUSE_SHARED Idx = Idx0; while ( Idx < NGrid2 ) { i = Idx%NGrid; j = Idx/NGrid; Def[ __umul24( 0U, NGrid2 ) + __umul24( j, NGrid ) + i ] = (real)0.0; Def[ __umul24( NGrid-1U, NGrid2 ) + __umul24( j, NGrid ) + i ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( i, NGrid ) + 0U ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( i, NGrid ) + NGrid-1U ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( 0U, NGrid ) + i ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( NGrid-1U, NGrid ) + i ] = (real)0.0; Idx += POT_NTHREAD; } # endif __syncthreads(); } // FUNCTION : ComputeDefect //------------------------------------------------------------------------------------------------------- // Function : LoadRho // Description : Load density field from the global memory to the shared memory // // Note : 1. "__syncthreads" is invoked in the end of this function // 2. Loaded data will be multiplied by "Poi_Coeff" // 3. The size of the shared-memory array "s_Rho" is "RHO_NXT+2" (padded with one zero on each // side in each direction) // // Parameter : g_Rho : Global memory array storing the input density // s_Rho : Shared memory array to store the density // Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a) // g_Idx0 : Starting read index from the global memory //------------------------------------------------------------------------------------------------------- __device__ void LoadRho( const real *g_Rho, real *s_Rho, const real Poi_Coeff, const uint g_Idx0 ) { uint s_Idx, g_Idx = g_Idx0; uint3 g_Idx3D; while ( g_Idx < RHO_NXT*RHO_NXT*RHO_NXT ) { g_Idx3D.x = g_Idx%RHO_NXT; g_Idx3D.y = g_Idx%(RHO_NXT*RHO_NXT)/RHO_NXT; g_Idx3D.z = g_Idx/(RHO_NXT*RHO_NXT); s_Idx = __umul24( __umul24( g_Idx3D.z+1U, NGRID_LV0 ) + g_Idx3D.y+1U, NGRID_LV0 ) + g_Idx3D.x+1U; s_Rho[s_Idx] = Poi_Coeff*g_Rho[g_Idx]; g_Idx += POT_NTHREAD; } __syncthreads(); } // LoadRho //------------------------------------------------------------------------------------------------------- // Function : Restrict // Description : Restrict the input fine-grid data to get the coarse-grid data // // Note : 1. We assume that the input arrays follow the "finite-difference" fashion, in which the data // are defined in the cell intersections instead of cell averages // --> N^3 cells define a 3D grid with the size equal to (N-1)^3 // 2. Fine-grid and coarse-grid data at boundaries are assumed to be zero (because defect at // boundaries are always zero) // 3. "__syncthreads" is invoked in the end of this function // // Parameter : FData : 1D array storing the input fine-grid data // CData : 1D array to store the output coarse-grid data // NGrid_F : Number of fine-grid cells in each spatial direction // NGrid_C : Number of coarse-grid cells in each spatial direction // CIdx0 : Starting coarse-grid index //------------------------------------------------------------------------------------------------------- __device__ void Restrict( const real *FData, real *CData, const uint NGrid_F, const uint NGrid_C, const uint CIdx0 ) { const real Ratio = real(NGrid_F-1) / real(NGrid_C-1); const uint NGrid_F2 = __umul24( NGrid_F, NGrid_F ); const uint NGrid_C2 = __umul24( NGrid_C, NGrid_C ); const uint NGrid_Cm2 = NGrid_C - 2U; const uint NGrid_Cm2_2 = __umul24( NGrid_Cm2, NGrid_Cm2 ); const uint NGrid_Cm2_3 = __umul24( NGrid_Cm2, NGrid_Cm2_2 ); const uint Fdi = 1U; const uint Fdj = NGrid_F; const uint Fdk = NGrid_F2; uint Ci, Cj, Ck, Fi, Fj, Fk, Cijk, Fijk; real x, y, z, Coeff_xm, Coeff_xc, Coeff_xp, Coeff_ym, Coeff_yc, Coeff_yp, Coeff_zm, Coeff_zc, Coeff_zp; uint CIdx = CIdx0; while ( CIdx < NGrid_Cm2_3 ) { Ci = 1U + CIdx%NGrid_Cm2; Cj = 1U + CIdx%NGrid_Cm2_2/NGrid_Cm2; Ck = 1U + CIdx/NGrid_Cm2_2; x = Ci*Ratio; y = Cj*Ratio; z = Ck*Ratio; Fi = uint( x + (real)0.5 ); Fj = uint( y + (real)0.5 ); Fk = uint( z + (real)0.5 ); Cijk = __umul24( Ck, NGrid_C2 ) + __umul24( Cj, NGrid_C ) + Ci; Fijk = __umul24( Fk, NGrid_F2 ) + __umul24( Fj, NGrid_F ) + Fi; Coeff_xm = (real)0.5 * ( Fi + (real)0.5 - x ); Coeff_ym = (real)0.5 * ( Fj + (real)0.5 - y ); Coeff_zm = (real)0.5 * ( Fk + (real)0.5 - z ); Coeff_xp = (real)0.5 - Coeff_xm; Coeff_yp = (real)0.5 - Coeff_ym; Coeff_zp = (real)0.5 - Coeff_zm; Coeff_xc = (real)0.5; Coeff_yc = (real)0.5; Coeff_zc = (real)0.5; // Coeff_xc = (real)1.0 - Coeff_xm - Coeff_xp; // Coeff_yc = (real)1.0 - Coeff_ym - Coeff_yp; // Coeff_zc = (real)1.0 - Coeff_zm - Coeff_zp; //###OPTIMIZATION : follow the same strategy adopted in "Int_Quadratic" CData[Cijk] = Coeff_zm * Coeff_ym * Coeff_xm * FData[ Fijk - Fdk - Fdj - Fdi ] + Coeff_zm * Coeff_ym * Coeff_xc * FData[ Fijk - Fdk - Fdj ] + Coeff_zm * Coeff_ym * Coeff_xp * FData[ Fijk - Fdk - Fdj + Fdi ] + Coeff_zm * Coeff_yc * Coeff_xm * FData[ Fijk - Fdk - Fdi ] + Coeff_zm * Coeff_yc * Coeff_xc * FData[ Fijk - Fdk ] + Coeff_zm * Coeff_yc * Coeff_xp * FData[ Fijk - Fdk + Fdi ] + Coeff_zm * Coeff_yp * Coeff_xm * FData[ Fijk - Fdk + Fdj - Fdi ] + Coeff_zm * Coeff_yp * Coeff_xc * FData[ Fijk - Fdk + Fdj ] + Coeff_zm * Coeff_yp * Coeff_xp * FData[ Fijk - Fdk + Fdj + Fdi ] + Coeff_zc * Coeff_ym * Coeff_xm * FData[ Fijk - Fdj - Fdi ] + Coeff_zc * Coeff_ym * Coeff_xc * FData[ Fijk - Fdj ] + Coeff_zc * Coeff_ym * Coeff_xp * FData[ Fijk - Fdj + Fdi ] + Coeff_zc * Coeff_yc * Coeff_xm * FData[ Fijk - Fdi ] + Coeff_zc * Coeff_yc * Coeff_xc * FData[ Fijk ] + Coeff_zc * Coeff_yc * Coeff_xp * FData[ Fijk + Fdi ] + Coeff_zc * Coeff_yp * Coeff_xm * FData[ Fijk + Fdj - Fdi ] + Coeff_zc * Coeff_yp * Coeff_xc * FData[ Fijk + Fdj ] + Coeff_zc * Coeff_yp * Coeff_xp * FData[ Fijk + Fdj + Fdi ] + Coeff_zp * Coeff_ym * Coeff_xm * FData[ Fijk + Fdk - Fdj - Fdi ] + Coeff_zp * Coeff_ym * Coeff_xc * FData[ Fijk + Fdk - Fdj ] + Coeff_zp * Coeff_ym * Coeff_xp * FData[ Fijk + Fdk - Fdj + Fdi ] + Coeff_zp * Coeff_yc * Coeff_xm * FData[ Fijk + Fdk - Fdi ] + Coeff_zp * Coeff_yc * Coeff_xc * FData[ Fijk + Fdk ] + Coeff_zp * Coeff_yc * Coeff_xp * FData[ Fijk + Fdk + Fdi ] + Coeff_zp * Coeff_yp * Coeff_xm * FData[ Fijk + Fdk + Fdj - Fdi ] + Coeff_zp * Coeff_yp * Coeff_xc * FData[ Fijk + Fdk + Fdj ] + Coeff_zp * Coeff_yp * Coeff_xp * FData[ Fijk + Fdk + Fdj + Fdi ]; // coefficient adopted in Enzo which seems to give faster convergence rate // CData[Cijk] *= (real)0.52*Ratio; CIdx += POT_NTHREAD; } // while ( CIdx < NGrid_Cm2_3 ) __syncthreads(); } // FUNCTION : Restrict //------------------------------------------------------------------------------------------------------- // Function : Prolongate_and_Correct // Description : Prolongate the input coarse-grid correction to correct the fine-grid solution/correction // // Note : 1. We assume that the input arrays follow the "finite-difference" fashion, in which the data // are defined in the cell intersections instead of cell averages // --> N^3 cells define a 3D grid with the size equal to (N-1)^3 // 2. Boundary data of FData_1D are not corrected (since solution/correction at boundaries // should be fixed // // Parameter : CData : 1D array storing the input coarse-grid data // FData : 1D array to store the output fine-grid data // NGrid_C : Number of coarse-grid cells in each spatial direction // NGrid_F : Number of fine-grid cells in each spatial direction // FIdx0 : Starting fine-grid index //------------------------------------------------------------------------------------------------------- __device__ void Prolongate_and_Correct( const real *CData, real *FData, const uint NGrid_C, const uint NGrid_F, const uint FIdx0 ) { const real Ratio = real(NGrid_C-1) / real(NGrid_F-1); const uint NGrid_C2 = __umul24( NGrid_C, NGrid_C ); const uint NGrid_F2 = __umul24( NGrid_F, NGrid_F ); const uint NGrid_Fm2 = NGrid_F - 2U; const uint NGrid_Fm2_2 = __umul24( NGrid_Fm2, NGrid_Fm2 ); const uint NGrid_Fm2_3 = __umul24( NGrid_Fm2, NGrid_Fm2_2 ); const uint Cdi = 1U; const uint Cdj = NGrid_C; const uint Cdk = NGrid_C2; uint Ci, Cj, Ck, Fi, Fj, Fk, Cijk, Fijk; real x, y, z, Coeff_xm, Coeff_xp, Coeff_ym, Coeff_yp, Coeff_zm, Coeff_zp; uint FIdx = FIdx0; while ( FIdx < NGrid_Fm2_3 ) { Fi = 1U + FIdx%NGrid_Fm2; Fj = 1U + FIdx%NGrid_Fm2_2/NGrid_Fm2; Fk = 1U + FIdx/NGrid_Fm2_2; x = Fi*Ratio; y = Fj*Ratio; z = Fk*Ratio; Ci = uint( x ); Cj = uint( y ); Ck = uint( z ); Cijk = __umul24( Ck, NGrid_C2 ) + __umul24( Cj, NGrid_C ) + Ci; Fijk = __umul24( Fk, NGrid_F2 ) + __umul24( Fj, NGrid_F ) + Fi; Coeff_xm = Ci + 1U - x; Coeff_ym = Cj + 1U - y; Coeff_zm = Ck + 1U - z; Coeff_xp = (real)1.0 - Coeff_xm; Coeff_yp = (real)1.0 - Coeff_ym; Coeff_zp = (real)1.0 - Coeff_zm; FData[Fijk] += Coeff_zm * Coeff_ym * Coeff_xm * CData[ Cijk ] + Coeff_zm * Coeff_ym * Coeff_xp * CData[ Cijk + Cdi ] + Coeff_zm * Coeff_yp * Coeff_xm * CData[ Cijk + Cdj ] + Coeff_zp * Coeff_ym * Coeff_xm * CData[ Cijk + Cdk ] + Coeff_zm * Coeff_yp * Coeff_xp * CData[ Cijk + Cdj + Cdi ] + Coeff_zp * Coeff_yp * Coeff_xm * CData[ Cijk + Cdk + Cdj ] + Coeff_zp * Coeff_ym * Coeff_xp * CData[ Cijk + Cdk + Cdi ] + Coeff_zp * Coeff_yp * Coeff_xp * CData[ Cijk + Cdk + Cdj + Cdi ]; FIdx += POT_NTHREAD; } // while ( FIdx < NGrid_Fm2_3 ) __syncthreads(); } // FUNCTION : Prolongate_and_Correct //------------------------------------------------------------------------------------------------------- // Function : EstimateError // Description : Estimate the L1 error // // Note : 1. "__syncthreads" is invoked in the end of this function // 2. Shared-memory arrays "s_Error" and "s_SolSum" are used for GPU reduction // // Parameter : Sol : 1D array storing the input solution to the Poisson equation // RHS : 1D array storing the RHS of the Poisson equation // dh : Grid size // s_Error : Shared-memory array to store the L1 error // s_SolSum : Shared-memroy array to store the sum of solution // tid : Thread index //------------------------------------------------------------------------------------------------------- __device__ void EstimateError( const real *Sol, const real *RHS, const real dh, real *s_Error, real *s_SolSum, const uint tid ) { # define NGRID_M2 ( NGRID_LV0 - 2U ) const real dh2 = dh*dh; const real _dh2 = (real)-1.0/dh2; const uint di = 1U; const uint dj = NGRID_LV0; const uint dk = NGRID_LV0*NGRID_LV0; const uint FloorPow2 = 1<<(31-__clz(POT_NTHREAD) ); // largest power-of-two value not greater than POT_NTHREAD const uint Remain = POT_NTHREAD - FloorPow2; uint i, j, k, ip, jp, kp, im, jm, km, ijk; uint Idx = tid; s_Error [tid] = (real)0.0; s_SolSum[tid] = (real)0.0; // 1. calculate defect while ( Idx < NGRID_M2*NGRID_M2*NGRID_M2 ) { i = 1U + Idx%NGRID_M2; j = 1U + Idx%(NGRID_M2*NGRID_M2)/NGRID_M2; k = 1U + Idx/(NGRID_M2*NGRID_M2); ijk = __umul24( k, NGRID_LV0*NGRID_LV0 ) + __umul24( j, NGRID_LV0 ) + i; ip = ijk + di; jp = ijk + dj; kp = ijk + dk; im = ijk - di; jm = ijk - dj; km = ijk - dk; s_Error [tid] += FABS( _dh2*( Sol[kp]+Sol[km]+Sol[jp]+Sol[jm]+Sol[ip]+Sol[im]-(real)6.0*Sol[ijk] ) + RHS[ijk] ); s_SolSum[tid] += FABS( Sol[ijk] ); Idx += POT_NTHREAD; } // while ( Idx < NGRID_M2*NGRID_M2*NGRID_M2 ) __syncthreads(); // 2. perform the reduction operation to get the L1 error // first sum up the elements larger than FloorPow2 to ensure that the number of remaining elements is power-of-two if ( tid < Remain ) { s_Error [tid] += s_Error [ tid + FloorPow2 ]; s_SolSum[tid] += s_SolSum[ tid + FloorPow2 ]; } // parallel reduction # if ( POT_NTHREAD >= 1024 ) # error : ERROR : POT_NTHREAD must < 1024 !! # endif # if ( POT_NTHREAD >= 512 ) if ( tid < 256 ) { s_Error [tid] += s_Error [ tid + 256 ]; s_SolSum[tid] += s_SolSum[ tid + 256 ]; } __syncthreads(); # endif # if ( POT_NTHREAD >= 256 ) if ( tid < 128 ) { s_Error [tid] += s_Error [ tid + 128 ]; s_SolSum[tid] += s_SolSum[ tid + 128 ]; } __syncthreads(); # endif # if ( POT_NTHREAD >= 128 ) if ( tid < 64 ) { s_Error [tid] += s_Error [ tid + 64 ]; s_SolSum[tid] += s_SolSum[ tid + 64 ]; } __syncthreads(); # endif // adopting warp-synchronous mechanism if ( tid < 32 ) { // declare volatile pointer to ensure that the operations are not reordered volatile real *s_vErr = s_Error; volatile real *s_vSol = s_SolSum; s_vErr[tid] += s_vErr[tid+32]; // here we have assumed that POT_NTHREAD >= 64 s_vErr[tid] += s_vErr[tid+16]; s_vErr[tid] += s_vErr[tid+ 8]; s_vErr[tid] += s_vErr[tid+ 4]; s_vErr[tid] += s_vErr[tid+ 2]; s_vErr[tid] += s_vErr[tid+ 1]; s_vSol[tid] += s_vSol[tid+32]; s_vSol[tid] += s_vSol[tid+16]; s_vSol[tid] += s_vSol[tid+ 8]; s_vSol[tid] += s_vSol[tid+ 4]; s_vSol[tid] += s_vSol[tid+ 2]; s_vSol[tid] += s_vSol[tid+ 1]; s_vErr[tid] = dh2*s_vErr[tid]/s_vSol[tid]; } __syncthreads(); # undef NGRID_M2 } // FUNCTION : EstimateError #endif // #if ( defined GRAVITY && defined GPU && POT_SCHEME == MG )
ad1794b1818f71fb1ef55f221c52321d5abf4569.cu
#include "Copyright.h" #include "Macro.h" #include "CUPOT.h" #ifdef GAMER_DEBUG #include <stdio.h> #endif #if ( defined GRAVITY && defined GPU && POT_SCHEME == MG ) #define POT_NXT_F ( PATCH_SIZE+2*POT_GHOST_SIZE ) #define POT_NTHREAD ( POT_BLOCK_SIZE_X ) #define POT_USELESS ( POT_GHOST_SIZE%2 ) #if ( POT_NXT_F == 18 ) // for POT_NXT_F == 18, we reuse the same shared memory array due to the lack of shared memory # define REUSE_SHARED # define MAX_NLV 4U # define NBOTTOM_SMOOTH 1U # define NGRID_LV0 18U # define NGRID_LV1 9U # define NGRID_LV2 5U # define NGRID_LV3 3U #elif ( POT_NXT_F == 16 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 7U # define NGRID_LV0 16U # define NGRID_LV1 8U # define NGRID_LV2 4U #elif ( POT_NXT_F == 14 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 7U # define NGRID_LV0 14U # define NGRID_LV1 7U # define NGRID_LV2 4U #elif ( POT_NXT_F == 12 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 1U # define NGRID_LV0 12U # define NGRID_LV1 6U # define NGRID_LV2 3U #elif ( POT_NXT_F == 10 ) # define MAX_NLV 3U # define NBOTTOM_SMOOTH 1U # define NGRID_LV0 10U # define NGRID_LV1 5U # define NGRID_LV2 3U #else #error ERROR : not supported POT_NXT_F #endif #if ( MAX_NLV != 3 && MAX_NLV != 4 ) #error ERROR : MAX_NLV != 3 or 4 #endif // variables reside in constant memory #include "CUPOT_PoissonSolver_SetConstMem.cu" // prototype static __device__ void LoadRho( const real *g_Rho, real *s_Rho, const real Poi_Coeff, const uint g_Idx0 ); static __device__ void Smoothing( real *Sol, const real *RHS, const real dh, const uint NGrid, const uint Idx0 ); static __device__ void ComputeDefect( const real *Sol, const real *RHS, real *Def, const real dh, const uint NGrid, const uint Idx0 ); static __device__ void Restrict( const real *FData, real *CData, const uint NGrid_F, const uint NGrid_C, const uint Idx0 ); static __device__ void Prolongate_and_Correct( const real *CData, real *FData, const uint NGrid_C, const uint NGrid_F, const uint FIdx0 ); static __device__ void EstimateError( const real *Sol, const real *RHS, const real dh, real *s_Error, real *s_SolSum, const uint tid ); //------------------------------------------------------------------------------------------------------- // Function : CUPOT_PoissonSolver_MG // Description : GPU Poisson solver using the multigrid scheme // // Note : a. Work for POT_GHOST_SIZE = 1, 2, 3, 4, 5 <--> POT_NXT_F = 10, 12, 14, 16, 18 // b. Prefix "g" for pointers pointing to the "Global" memory space // Prefix "s" for pointers pointing to the "Shared" memory space // c. Reference : Numerical Recipes, Chapter 20.6 // // Parameter : g_Rho_Array : Global memory array storing the input density // g_Pot_Array_In : Global memory array storing the input "coarse-grid" potential for ] // interpolation // g_Pot_Array_Out : Global memory array to store the output potential // dh_Min : Grid size of the input data // Max_Iter : Maximum number of iterations for multigrid // NPre_Smooth : Number of pre-smoothing steps for multigrid // NPost_Smooth : Number of post-smoothing steps for multigrid // Tolerated_Error : Maximum tolerated error for multigrid // Poi_Coeff : Coefficient in front of the RHS in the Poisson eq. // IntScheme : Interpolation scheme for potential // --> currently supported schemes include // INT_CQUAD : conservative quadratic interpolation // INT_QUAD : quadratic interpolation //--------------------------------------------------------------------------------------------------- __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ) { const uint bid = blockIdx.x; const uint tid = threadIdx.x; const uint dy = POT_NXT_F; const uint dz = POT_NXT_F*POT_NXT_F; int Iter; uint t, s_Idx; real dh[MAX_NLV]; // set the grid sizes at all different levels # if ( MAX_NLV == 4 ) const uint NGrid[MAX_NLV] = { NGRID_LV0, NGRID_LV1, NGRID_LV2, NGRID_LV3 }; # elif ( MAX_NLV == 3 ) const uint NGrid[MAX_NLV] = { NGRID_LV0, NGRID_LV1, NGRID_LV2 }; # endif dh[0] = dh_Min; for (uint Lv=1U; Lv<MAX_NLV; Lv++) dh[Lv] = dh_Min * ( NGrid[0] - 1U ) / ( NGrid[Lv] - 1U ); // allocate shared memory __shared__ real s_Sol_Lv0[ NGRID_LV0*NGRID_LV0*NGRID_LV0 ]; # ifndef FLOAT8 __shared__ real s_RHS_Lv0[ NGRID_LV0*NGRID_LV0*NGRID_LV0 ]; __shared__ real s_SolSum[POT_NTHREAD]; __shared__ real s_Error [POT_NTHREAD]; # else // shared memory is too small for double precision --> use global memory instead __shared__ real *s_RHS_Lv0; __shared__ real *s_SolSum; __shared__ real *s_Error; if ( tid == 0 ) { s_RHS_Lv0 = (real*)malloc( sizeof(real*)*NGRID_LV0*NGRID_LV0*NGRID_LV0 ); s_SolSum = (real*)malloc( sizeof(real*)*POT_NTHREAD ); s_Error = (real*)malloc( sizeof(real*)*POT_NTHREAD ); } __syncthreads(); # ifdef GAMER_DEBUG if ( tid == 0 ) { if ( s_RHS_Lv0 == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_RHS_Lv0", bid, __FUNCTION__ ); return; } if ( s_SolSum == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_SolSum", bid, __FUNCTION__ ); return; } if ( s_Error == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_Error", bid, __FUNCTION__ ); return; } } # endif # endif // #ifndef FLOAT8 ... else ... real *s_Def_Lv0 = s_RHS_Lv0; // s_Def_Lv0, s_CPot and RHS_Lv0 share the same shared-memory array real *s_CPot = s_RHS_Lv0; # ifdef REUSE_SHARED // reuse the shared-memory arrays due to the lack of shared memory real *s_Sol_Lv1 = s_RHS_Lv0; real *s_Sol_Lv2 = s_Sol_Lv1 + NGRID_LV1*NGRID_LV1*NGRID_LV1; real *s_Sol_Lv3 = s_Sol_Lv2 + NGRID_LV2*NGRID_LV2*NGRID_LV2; real *s_RHS_Lv2 = s_Sol_Lv3 + NGRID_LV3*NGRID_LV3*NGRID_LV3; real *s_RHS_Lv3 = s_RHS_Lv2 + NGRID_LV2*NGRID_LV2*NGRID_LV2; real *s_Def_Lv1 = s_RHS_Lv3 + NGRID_LV3*NGRID_LV3*NGRID_LV3; real *s_Def_Lv2 = s_Def_Lv1 + NGRID_LV1*NGRID_LV1*NGRID_LV1; real *s_Def_Lv3 = s_Def_Lv2 + NGRID_LV2*NGRID_LV2*NGRID_LV2; // use global memory for s_RHS_Lv1 because s_RHS_Lv1 and s_Def_Lv0 cannot share the same memory space __shared__ real *s_RHS_Lv1; if ( tid == 0 ) s_RHS_Lv1 = (real*) malloc( sizeof(real)*NGRID_LV1*NGRID_LV1*NGRID_LV1 ); __syncthreads(); # ifdef GAMER_DEBUG if ( tid == 0 && s_RHS_Lv1 == NULL ) { printf( "ERROR : dynamic global memory allocation for \"%s\" failed at block %d in \"%s\" !!\n", "s_RHS_Lv1", bid, __FUNCTION__ ); return; } # endif # else // #ifdef REUSE_SHARED ... else ... __shared__ real s_Sol_Lv1[ NGRID_LV1*NGRID_LV1*NGRID_LV1 ]; __shared__ real s_Sol_Lv2[ NGRID_LV2*NGRID_LV2*NGRID_LV2 ]; __shared__ real s_RHS_Lv1[ NGRID_LV1*NGRID_LV1*NGRID_LV1 ]; __shared__ real s_RHS_Lv2[ NGRID_LV2*NGRID_LV2*NGRID_LV2 ]; __shared__ real s_Def_Lv1[ NGRID_LV1*NGRID_LV1*NGRID_LV1 ]; __shared__ real s_Def_Lv2[ NGRID_LV2*NGRID_LV2*NGRID_LV2 ]; # if ( MAX_NLV == 4 ) __shared__ real s_Sol_Lv3[ NGRID_LV3*NGRID_LV3*NGRID_LV3 ]; __shared__ real s_RHS_Lv3[ NGRID_LV3*NGRID_LV3*NGRID_LV3 ]; __shared__ real s_Def_Lv3[ NGRID_LV3*NGRID_LV3*NGRID_LV3 ]; # endif # endif // #ifdef REUSE_SHARED ... else ... # if ( MAX_NLV == 4 ) real *s_Sol[MAX_NLV] = { s_Sol_Lv0, s_Sol_Lv1, s_Sol_Lv2, s_Sol_Lv3 }; real *s_RHS[MAX_NLV] = { s_RHS_Lv0, s_RHS_Lv1, s_RHS_Lv2, s_RHS_Lv3 }; real *s_Def[MAX_NLV] = { s_Def_Lv0, s_Def_Lv1, s_Def_Lv2, s_Def_Lv3 }; # elif ( MAX_NLV == 3 ) real *s_Sol[MAX_NLV] = { s_Sol_Lv0, s_Sol_Lv1, s_Sol_Lv2 }; real *s_RHS[MAX_NLV] = { s_RHS_Lv0, s_RHS_Lv1, s_RHS_Lv2 }; real *s_Def[MAX_NLV] = { s_Def_Lv0, s_Def_Lv1, s_Def_Lv2 }; # endif s_Error[0] = __FLT_MAX__; // a. load the coarse-grid potential into shared memory // ----------------------------------------------------------------------------------------------------------- t = tid; while ( t < POT_NXT*POT_NXT*POT_NXT ) { s_CPot[t] = g_Pot_Array_In[bid][t]; t += POT_NTHREAD; } __syncthreads(); // b. evaluate the "fine-grid" potential by interpolation (as the initial guess and the B.C.) // ----------------------------------------------------------------------------------------------------------- const int N_CSlice = POT_NTHREAD / ( (POT_NXT-2)*(POT_NXT-2) ); if ( tid < N_CSlice*(POT_NXT-2)*(POT_NXT-2) ) { const real Const_8 = 1.0/8.0; const real Const_64 = 1.0/64.0; const real Const_512 = 1.0/512.0; const int Cdx = 1; const int Cdy = POT_NXT; const int Cdz = POT_NXT*POT_NXT; const int CIDx = 1 + tid % ( POT_NXT-2 ); const int CIDy = 1 + ( tid % ( (POT_NXT-2)*(POT_NXT-2) ) ) / ( POT_NXT-2 ); const int CIDz = 1 + tid / ( (POT_NXT-2)*(POT_NXT-2) ); int CID = __mul24( CIDz, Cdz ) + __mul24( CIDy, Cdy ) + __mul24( CIDx, Cdx ); const int Fdx = 1; const int Fdy = POT_NXT_F; const int Fdz = POT_NXT_F*POT_NXT_F; const int FIDx = ( (CIDx-1)<<1 ) - POT_USELESS; const int FIDy = ( (CIDy-1)<<1 ) - POT_USELESS; int FIDz = ( (CIDz-1)<<1 ) - POT_USELESS; int FID = __mul24( FIDz, Fdz ) + __mul24( FIDy, Fdy ) + __mul24( FIDx, Fdx ); real TempFPot1, TempFPot2, TempFPot3, TempFPot4, TempFPot5, TempFPot6, TempFPot7, TempFPot8; real Slope_00, Slope_01, Slope_02, Slope_03, Slope_04, Slope_05, Slope_06, Slope_07; real Slope_08, Slope_09, Slope_10, Slope_11, Slope_12; int Idx, Idy, Idz, ii, jj, kk; for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice) { switch ( IntScheme ) { /* case INT_CENTRAL : { Slope_00 = (real)0.125 * ( s_CPot[CID+Cdx] - s_CPot[CID-Cdx] ); Slope_01 = (real)0.125 * ( s_CPot[CID+Cdy] - s_CPot[CID-Cdy] ); Slope_02 = (real)0.125 * ( s_CPot[CID+Cdz] - s_CPot[CID-Cdz] ); TempFPot1 = s_CPot[CID] - Slope_00 - Slope_01 - Slope_02; TempFPot2 = s_CPot[CID] + Slope_00 - Slope_01 - Slope_02; TempFPot3 = s_CPot[CID] - Slope_00 + Slope_01 - Slope_02; TempFPot4 = s_CPot[CID] + Slope_00 + Slope_01 - Slope_02; TempFPot5 = s_CPot[CID] - Slope_00 - Slope_01 + Slope_02; TempFPot6 = s_CPot[CID] + Slope_00 - Slope_01 + Slope_02; TempFPot7 = s_CPot[CID] - Slope_00 + Slope_01 + Slope_02; TempFPot8 = s_CPot[CID] + Slope_00 + Slope_01 + Slope_02; } break; // INT_CENTRAL */ case INT_CQUAD : { Slope_00 = Const_8 * ( s_CPot[CID+Cdx ] - s_CPot[CID-Cdx ] ); Slope_01 = Const_8 * ( s_CPot[CID +Cdy ] - s_CPot[CID -Cdy ] ); Slope_02 = Const_8 * ( s_CPot[CID +Cdz] - s_CPot[CID -Cdz] ); Slope_03 = Const_64 * ( s_CPot[CID+Cdx -Cdz] - s_CPot[CID-Cdx -Cdz] ); Slope_04 = Const_64 * ( s_CPot[CID +Cdy-Cdz] - s_CPot[CID -Cdy-Cdz] ); Slope_05 = Const_64 * ( s_CPot[CID+Cdx-Cdy ] - s_CPot[CID-Cdx-Cdy ] ); Slope_06 = Const_64 * ( s_CPot[CID+Cdx+Cdy ] - s_CPot[CID-Cdx+Cdy ] ); Slope_07 = Const_64 * ( s_CPot[CID+Cdx +Cdz] - s_CPot[CID-Cdx +Cdz] ); Slope_08 = Const_64 * ( s_CPot[CID +Cdy+Cdz] - s_CPot[CID -Cdy+Cdz] ); Slope_09 = Const_512 * ( s_CPot[CID+Cdx-Cdy-Cdz] - s_CPot[CID-Cdx-Cdy-Cdz] ); Slope_10 = Const_512 * ( s_CPot[CID+Cdx+Cdy-Cdz] - s_CPot[CID-Cdx+Cdy-Cdz] ); Slope_11 = Const_512 * ( s_CPot[CID+Cdx-Cdy+Cdz] - s_CPot[CID-Cdx-Cdy+Cdz] ); Slope_12 = Const_512 * ( s_CPot[CID+Cdx+Cdy+Cdz] - s_CPot[CID-Cdx+Cdy+Cdz] ); TempFPot1 = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06 + Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot2 = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06 - Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; TempFPot3 = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06 + Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; TempFPot4 = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06 - Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot5 = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06 - Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; TempFPot6 = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06 + Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot7 = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06 - Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID]; TempFPot8 = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06 + Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID]; } break; // INT_CQUAD case INT_QUAD : { TempFPot1 = TempFPot2 = TempFPot3 = TempFPot4 = (real)0.0; TempFPot5 = TempFPot6 = TempFPot7 = TempFPot8 = (real)0.0; for (int dk=-1; dk<=1; dk++) { Idz = dk+1; kk = __mul24( dk, Cdz ); for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy ); for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx ); TempFPot1 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mm[Idx]; TempFPot2 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mp[Idx]; TempFPot3 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mm[Idx]; TempFPot4 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mp[Idx]; TempFPot5 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mm[Idx]; TempFPot6 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mp[Idx]; TempFPot7 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mm[Idx]; TempFPot8 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mp[Idx]; }}} } break; // INT_QUAD } // switch ( IntScheme ) // save data to the shared-memory array. // Currently this part is highly diverge. However, since the interpolation takes much less time than the // Poisson solver does, we have not yet tried to optimize this part if ( FIDz >= 0 ) { if ( FIDx >= 0 && FIDy >= 0 ) s_Sol_Lv0[FID ] = TempFPot1; if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_Sol_Lv0[FID+Fdx ] = TempFPot2; if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID +Fdy ] = TempFPot3; if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID+Fdx+Fdy ] = TempFPot4; } if ( FIDz <= POT_NXT_F-2 ) { if ( FIDx >= 0 && FIDy >= 0 ) s_Sol_Lv0[FID +Fdz] = TempFPot5; if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_Sol_Lv0[FID+Fdx +Fdz] = TempFPot6; if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID +Fdy+Fdz] = TempFPot7; if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_Sol_Lv0[FID+Fdx+Fdy+Fdz] = TempFPot8; } CID += __mul24( N_CSlice, Cdz ); FID += __mul24( 2*N_CSlice, Fdz ); FIDz += 2*N_CSlice; } // for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice) } // if ( tid < N_CSlice*(POT_NXT-2)*(POT_NXT-2) ) __syncthreads(); // c1. initialize s_Def_Lv{0-3} as zero (just to make sure that the boundary cells of s_Def_Lv{0-3} are zero) // (note that s_Def_Lv0 and s_CPot share the same array) // ----------------------------------------------------------------------------------------------------------- # ifndef REUSE_SHARED t = tid; while ( t < NGRID_LV0*NGRID_LV0*NGRID_LV0 ) { s_Def_Lv0[t] = (real)0.0; t += POT_NTHREAD; } t = tid; while ( t < NGRID_LV1*NGRID_LV1*NGRID_LV1 ) { s_Def_Lv1[t] = (real)0.0; t += POT_NTHREAD; } t = tid; while ( t < NGRID_LV2*NGRID_LV2*NGRID_LV2 ) { s_Def_Lv2[t] = (real)0.0; t += POT_NTHREAD; } # if ( MAX_NLV == 4 ) t = tid; while ( t < NGRID_LV3*NGRID_LV3*NGRID_LV3 ) { s_Def_Lv3[t] = (real)0.0; t += POT_NTHREAD; } # endif __syncthreads(); # endif // c2 load density into shared memory LoadRho( g_Rho_Array[bid], s_RHS_Lv0, Poi_Coeff, tid ); // d. use the MG scheme to evaluate potential // ----------------------------------------------------------------------------------------------------------- Iter = 0; while ( Iter < Max_Iter && s_Error[0] > Tolerated_Error ) { // V-cycle : finer --> coarser grids for (int Lv=0; Lv<MAX_NLV-1; Lv++) { // pre-smoothing (apply relaxation to compute solution/correction) for (int PreStep=0; PreStep<NPre_Smooth; PreStep++) Smoothing( s_Sol[Lv], s_RHS[Lv], dh[Lv], NGrid[Lv], tid ); // compute defect ComputeDefect( s_Sol[Lv], s_RHS[Lv], s_Def[Lv], dh[Lv], NGrid[Lv], tid ); // restrict defect (use as the RHS at the next level) Restrict( s_Def[Lv], s_RHS[Lv+1], NGrid[Lv], NGrid[Lv+1], tid ); // initialize the correction at the next level to zero t = tid; while ( t < NGrid[Lv+1]*NGrid[Lv+1]*NGrid[Lv+1] ) { s_Sol[Lv+1][t] = (real)0.0; t += POT_NTHREAD; } __syncthreads(); } // calculate the correction at the bottom level for (int BottomStep=0; BottomStep<NBOTTOM_SMOOTH; BottomStep++) Smoothing( s_Sol[MAX_NLV-1], s_RHS[MAX_NLV-1], dh[MAX_NLV-1], NGrid[MAX_NLV-1], tid ); // V-cycle : coarser --> finer grids for (int Lv=MAX_NLV-2; Lv>=0; Lv--) { // prolongate correction (from Lv+1 to Lv) and correct solution/correction at Lv Prolongate_and_Correct( s_Sol[Lv+1], s_Sol[Lv], NGrid[Lv+1], NGrid[Lv], tid ); // load s_RHS_Lv0 from global memory since s_RHS_Lv0 and s_Def_Lv0 share the same shared-memory array if ( Lv == 0 ) LoadRho( g_Rho_Array[bid], s_RHS_Lv0, Poi_Coeff, tid ); // post-smoothing (apply relaxation to compute solution/correction again) for (int PostStep=0; PostStep<NPost_Smooth; PostStep++) Smoothing( s_Sol[Lv], s_RHS[Lv], dh[Lv], NGrid[Lv], tid ); } // estimate error EstimateError( s_Sol[0], s_RHS[0], dh[0], s_Error, s_SolSum, tid ); Iter ++; // if ( tid == 0 && bid == 0 ) printf( "Patch %3d, Iter %3d: Error = %13.7e\n", bid, Iter, s_Error[0] ); } // while ( Iter < Max_Iter && Error > Tolerated_Error ) // e. store potential back to global memory // ----------------------------------------------------------------------------------------------------------- t = tid; while ( t < GRA_NXT*GRA_NXT*GRA_NXT ) { s_Idx = __umul24( t/(GRA_NXT*GRA_NXT) + POT_GHOST_SIZE - GRA_GHOST_SIZE, dz ) + __umul24( t%(GRA_NXT*GRA_NXT)/GRA_NXT + POT_GHOST_SIZE - GRA_GHOST_SIZE, dy ) + t%(GRA_NXT ) + POT_GHOST_SIZE - GRA_GHOST_SIZE; g_Pot_Array_Out[bid][t] = s_Sol_Lv0[s_Idx]; t += POT_NTHREAD; } // free memory # ifdef REUSE_SHARED if ( tid == 0 ) free( s_RHS_Lv1 ); # endif # ifdef FLOAT8 if ( tid == 0 ) { free( s_RHS_Lv0 ); free( s_SolSum ); free( s_Error ); } # endif } // FUNCTION : CUPOT_PoissonSolver_MG //------------------------------------------------------------------------------------------------------- // Function : Smoothing // Description : Use Gauss-Seidel method for smoothing // // Note : 1. B.C. should be stored in the input array "Sol" // 2. "__syncthreads" is invoked in the end of this function // // Parameter : Sol : 1D array to store the output solution to the Poisson equation // RHS : 1D array storing the RHS of the Poisson equation // dh : Grid size // NGrid : Number of cells in each spatial direction for Sol and RHS // Idx0 : Starting cell index //------------------------------------------------------------------------------------------------------- __device__ void Smoothing( real *Sol, const real *RHS, const real dh, const uint NGrid, const uint Idx0 ) { const real dh2 = dh*dh; const real One_Six = (real)1.0/(real)6.0; const uint NGrid_m2 = NGrid - 2U; const uint NGrid2 = __umul24( NGrid, NGrid ); const uint NGrid_2 = (NGrid_m2+1)>>1U; // if NGrid is an odd number, one cell is padded const uint NGrid2_2 = __umul24( NGrid_m2, NGrid_2 ); const uint NInner_2 = __umul24( NGrid_m2, NGrid2_2 ); const uint di = 1U; const uint dj = NGrid; const uint dk = NGrid2; uint i, j, k, ip, jp, kp, im, jm, km, ijk, pass_flip; uint Idx; // odd-even ordering for (uint pass=0; pass<2U; pass++) { Idx = Idx0; pass_flip = pass & 1U; while ( Idx < NInner_2 ) { i = Idx%NGrid_2<<1U; j = Idx%NGrid2_2/NGrid_2; k = Idx/NGrid2_2; i += 1U + ( (j&1U)^(k&1U)^pass_flip ); j++; k++; ijk = __umul24( k, NGrid2 ) + __umul24( j, NGrid ) + i; ip = ijk + di; jp = ijk + dj; kp = ijk + dk; im = ijk - di; jm = ijk - dj; km = ijk - dk; // update solution //###OPTIMIZATION: try to optimize out this conditional operation (for the case that NGrid is odd) # if ( ( POT_NXT_F & (POT_NXT_F-1) ) != 0 ) if ( i <= NGrid_m2 ) # endif Sol[ijk] = One_Six*( Sol[kp] + Sol[km] + Sol[jp] + Sol[jm] + Sol[ip] + Sol[im] - dh2*RHS[ijk] ); Idx += POT_NTHREAD; } // while ( Idx < NInner_2 ) __syncthreads(); } // for (int pass=0; pass<2; pass++) } // FUNCTION : Smoothing //------------------------------------------------------------------------------------------------------- // Function : ComputeDefect // Description : Compute negative defect defined as "-(Laplacian(Sol)-RHS)" // // Note : 1. B.C. should be stored in the input array "Sol" // 2. It is assumed that the boundary values of Def have already been initialized as zero // (unless REUSE_SHARED is defined) // 3. "__syncthreads" is invoked in the end of this function // // Parameter : Sol : 1D array storing the input solution to the Poisson equation // RHS : 1D array storing the RHS of the Poisson equation // Def : 1D array to store the output defect // dh : Grid size // NGrid : Number of cells in each spatial direction for Sol and RHS // Idx0 : Starting cell index //------------------------------------------------------------------------------------------------------- __device__ void ComputeDefect( const real *Sol, const real *RHS, real *Def, const real dh, const uint NGrid, const uint Idx0 ) { const real _dh2 = (real)-1.0/(dh*dh); const uint NGrid2 = __umul24( NGrid, NGrid ); const uint NGrid_m2 = NGrid - 2U; const uint NGrid_m2_2 = __umul24( NGrid_m2, NGrid_m2 ); const uint NGrid_m2_3 = __umul24( NGrid_m2, NGrid_m2_2 ); const uint di = 1U; const uint dj = NGrid; const uint dk = NGrid2; uint i, j, k, ip, jp, kp, im, jm, km, ijk; uint Idx = Idx0; while ( Idx < NGrid_m2_3 ) { i = 1U + Idx%NGrid_m2; j = 1U + Idx%NGrid_m2_2/NGrid_m2; k = 1U + Idx/NGrid_m2_2; ijk = __umul24( k, NGrid2 ) + __umul24( j, NGrid ) + i; ip = ijk + di; jp = ijk + dj; kp = ijk + dk; im = ijk - di; jm = ijk - dj; km = ijk - dk; Def[ijk] = _dh2*( Sol[kp] + Sol[km] + Sol[jp] + Sol[jm] + Sol[ip] + Sol[im] - (real)6.0*Sol[ijk] ) + RHS[ijk]; Idx += POT_NTHREAD; } // while ( Idx < NGrid_m2_3 ) // set the boundary values as zeros # ifdef REUSE_SHARED Idx = Idx0; while ( Idx < NGrid2 ) { i = Idx%NGrid; j = Idx/NGrid; Def[ __umul24( 0U, NGrid2 ) + __umul24( j, NGrid ) + i ] = (real)0.0; Def[ __umul24( NGrid-1U, NGrid2 ) + __umul24( j, NGrid ) + i ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( i, NGrid ) + 0U ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( i, NGrid ) + NGrid-1U ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( 0U, NGrid ) + i ] = (real)0.0; Def[ __umul24( j, NGrid2 ) + __umul24( NGrid-1U, NGrid ) + i ] = (real)0.0; Idx += POT_NTHREAD; } # endif __syncthreads(); } // FUNCTION : ComputeDefect //------------------------------------------------------------------------------------------------------- // Function : LoadRho // Description : Load density field from the global memory to the shared memory // // Note : 1. "__syncthreads" is invoked in the end of this function // 2. Loaded data will be multiplied by "Poi_Coeff" // 3. The size of the shared-memory array "s_Rho" is "RHO_NXT+2" (padded with one zero on each // side in each direction) // // Parameter : g_Rho : Global memory array storing the input density // s_Rho : Shared memory array to store the density // Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a) // g_Idx0 : Starting read index from the global memory //------------------------------------------------------------------------------------------------------- __device__ void LoadRho( const real *g_Rho, real *s_Rho, const real Poi_Coeff, const uint g_Idx0 ) { uint s_Idx, g_Idx = g_Idx0; uint3 g_Idx3D; while ( g_Idx < RHO_NXT*RHO_NXT*RHO_NXT ) { g_Idx3D.x = g_Idx%RHO_NXT; g_Idx3D.y = g_Idx%(RHO_NXT*RHO_NXT)/RHO_NXT; g_Idx3D.z = g_Idx/(RHO_NXT*RHO_NXT); s_Idx = __umul24( __umul24( g_Idx3D.z+1U, NGRID_LV0 ) + g_Idx3D.y+1U, NGRID_LV0 ) + g_Idx3D.x+1U; s_Rho[s_Idx] = Poi_Coeff*g_Rho[g_Idx]; g_Idx += POT_NTHREAD; } __syncthreads(); } // LoadRho //------------------------------------------------------------------------------------------------------- // Function : Restrict // Description : Restrict the input fine-grid data to get the coarse-grid data // // Note : 1. We assume that the input arrays follow the "finite-difference" fashion, in which the data // are defined in the cell intersections instead of cell averages // --> N^3 cells define a 3D grid with the size equal to (N-1)^3 // 2. Fine-grid and coarse-grid data at boundaries are assumed to be zero (because defect at // boundaries are always zero) // 3. "__syncthreads" is invoked in the end of this function // // Parameter : FData : 1D array storing the input fine-grid data // CData : 1D array to store the output coarse-grid data // NGrid_F : Number of fine-grid cells in each spatial direction // NGrid_C : Number of coarse-grid cells in each spatial direction // CIdx0 : Starting coarse-grid index //------------------------------------------------------------------------------------------------------- __device__ void Restrict( const real *FData, real *CData, const uint NGrid_F, const uint NGrid_C, const uint CIdx0 ) { const real Ratio = real(NGrid_F-1) / real(NGrid_C-1); const uint NGrid_F2 = __umul24( NGrid_F, NGrid_F ); const uint NGrid_C2 = __umul24( NGrid_C, NGrid_C ); const uint NGrid_Cm2 = NGrid_C - 2U; const uint NGrid_Cm2_2 = __umul24( NGrid_Cm2, NGrid_Cm2 ); const uint NGrid_Cm2_3 = __umul24( NGrid_Cm2, NGrid_Cm2_2 ); const uint Fdi = 1U; const uint Fdj = NGrid_F; const uint Fdk = NGrid_F2; uint Ci, Cj, Ck, Fi, Fj, Fk, Cijk, Fijk; real x, y, z, Coeff_xm, Coeff_xc, Coeff_xp, Coeff_ym, Coeff_yc, Coeff_yp, Coeff_zm, Coeff_zc, Coeff_zp; uint CIdx = CIdx0; while ( CIdx < NGrid_Cm2_3 ) { Ci = 1U + CIdx%NGrid_Cm2; Cj = 1U + CIdx%NGrid_Cm2_2/NGrid_Cm2; Ck = 1U + CIdx/NGrid_Cm2_2; x = Ci*Ratio; y = Cj*Ratio; z = Ck*Ratio; Fi = uint( x + (real)0.5 ); Fj = uint( y + (real)0.5 ); Fk = uint( z + (real)0.5 ); Cijk = __umul24( Ck, NGrid_C2 ) + __umul24( Cj, NGrid_C ) + Ci; Fijk = __umul24( Fk, NGrid_F2 ) + __umul24( Fj, NGrid_F ) + Fi; Coeff_xm = (real)0.5 * ( Fi + (real)0.5 - x ); Coeff_ym = (real)0.5 * ( Fj + (real)0.5 - y ); Coeff_zm = (real)0.5 * ( Fk + (real)0.5 - z ); Coeff_xp = (real)0.5 - Coeff_xm; Coeff_yp = (real)0.5 - Coeff_ym; Coeff_zp = (real)0.5 - Coeff_zm; Coeff_xc = (real)0.5; Coeff_yc = (real)0.5; Coeff_zc = (real)0.5; // Coeff_xc = (real)1.0 - Coeff_xm - Coeff_xp; // Coeff_yc = (real)1.0 - Coeff_ym - Coeff_yp; // Coeff_zc = (real)1.0 - Coeff_zm - Coeff_zp; //###OPTIMIZATION : follow the same strategy adopted in "Int_Quadratic" CData[Cijk] = Coeff_zm * Coeff_ym * Coeff_xm * FData[ Fijk - Fdk - Fdj - Fdi ] + Coeff_zm * Coeff_ym * Coeff_xc * FData[ Fijk - Fdk - Fdj ] + Coeff_zm * Coeff_ym * Coeff_xp * FData[ Fijk - Fdk - Fdj + Fdi ] + Coeff_zm * Coeff_yc * Coeff_xm * FData[ Fijk - Fdk - Fdi ] + Coeff_zm * Coeff_yc * Coeff_xc * FData[ Fijk - Fdk ] + Coeff_zm * Coeff_yc * Coeff_xp * FData[ Fijk - Fdk + Fdi ] + Coeff_zm * Coeff_yp * Coeff_xm * FData[ Fijk - Fdk + Fdj - Fdi ] + Coeff_zm * Coeff_yp * Coeff_xc * FData[ Fijk - Fdk + Fdj ] + Coeff_zm * Coeff_yp * Coeff_xp * FData[ Fijk - Fdk + Fdj + Fdi ] + Coeff_zc * Coeff_ym * Coeff_xm * FData[ Fijk - Fdj - Fdi ] + Coeff_zc * Coeff_ym * Coeff_xc * FData[ Fijk - Fdj ] + Coeff_zc * Coeff_ym * Coeff_xp * FData[ Fijk - Fdj + Fdi ] + Coeff_zc * Coeff_yc * Coeff_xm * FData[ Fijk - Fdi ] + Coeff_zc * Coeff_yc * Coeff_xc * FData[ Fijk ] + Coeff_zc * Coeff_yc * Coeff_xp * FData[ Fijk + Fdi ] + Coeff_zc * Coeff_yp * Coeff_xm * FData[ Fijk + Fdj - Fdi ] + Coeff_zc * Coeff_yp * Coeff_xc * FData[ Fijk + Fdj ] + Coeff_zc * Coeff_yp * Coeff_xp * FData[ Fijk + Fdj + Fdi ] + Coeff_zp * Coeff_ym * Coeff_xm * FData[ Fijk + Fdk - Fdj - Fdi ] + Coeff_zp * Coeff_ym * Coeff_xc * FData[ Fijk + Fdk - Fdj ] + Coeff_zp * Coeff_ym * Coeff_xp * FData[ Fijk + Fdk - Fdj + Fdi ] + Coeff_zp * Coeff_yc * Coeff_xm * FData[ Fijk + Fdk - Fdi ] + Coeff_zp * Coeff_yc * Coeff_xc * FData[ Fijk + Fdk ] + Coeff_zp * Coeff_yc * Coeff_xp * FData[ Fijk + Fdk + Fdi ] + Coeff_zp * Coeff_yp * Coeff_xm * FData[ Fijk + Fdk + Fdj - Fdi ] + Coeff_zp * Coeff_yp * Coeff_xc * FData[ Fijk + Fdk + Fdj ] + Coeff_zp * Coeff_yp * Coeff_xp * FData[ Fijk + Fdk + Fdj + Fdi ]; // coefficient adopted in Enzo which seems to give faster convergence rate // CData[Cijk] *= (real)0.52*Ratio; CIdx += POT_NTHREAD; } // while ( CIdx < NGrid_Cm2_3 ) __syncthreads(); } // FUNCTION : Restrict //------------------------------------------------------------------------------------------------------- // Function : Prolongate_and_Correct // Description : Prolongate the input coarse-grid correction to correct the fine-grid solution/correction // // Note : 1. We assume that the input arrays follow the "finite-difference" fashion, in which the data // are defined in the cell intersections instead of cell averages // --> N^3 cells define a 3D grid with the size equal to (N-1)^3 // 2. Boundary data of FData_1D are not corrected (since solution/correction at boundaries // should be fixed // // Parameter : CData : 1D array storing the input coarse-grid data // FData : 1D array to store the output fine-grid data // NGrid_C : Number of coarse-grid cells in each spatial direction // NGrid_F : Number of fine-grid cells in each spatial direction // FIdx0 : Starting fine-grid index //------------------------------------------------------------------------------------------------------- __device__ void Prolongate_and_Correct( const real *CData, real *FData, const uint NGrid_C, const uint NGrid_F, const uint FIdx0 ) { const real Ratio = real(NGrid_C-1) / real(NGrid_F-1); const uint NGrid_C2 = __umul24( NGrid_C, NGrid_C ); const uint NGrid_F2 = __umul24( NGrid_F, NGrid_F ); const uint NGrid_Fm2 = NGrid_F - 2U; const uint NGrid_Fm2_2 = __umul24( NGrid_Fm2, NGrid_Fm2 ); const uint NGrid_Fm2_3 = __umul24( NGrid_Fm2, NGrid_Fm2_2 ); const uint Cdi = 1U; const uint Cdj = NGrid_C; const uint Cdk = NGrid_C2; uint Ci, Cj, Ck, Fi, Fj, Fk, Cijk, Fijk; real x, y, z, Coeff_xm, Coeff_xp, Coeff_ym, Coeff_yp, Coeff_zm, Coeff_zp; uint FIdx = FIdx0; while ( FIdx < NGrid_Fm2_3 ) { Fi = 1U + FIdx%NGrid_Fm2; Fj = 1U + FIdx%NGrid_Fm2_2/NGrid_Fm2; Fk = 1U + FIdx/NGrid_Fm2_2; x = Fi*Ratio; y = Fj*Ratio; z = Fk*Ratio; Ci = uint( x ); Cj = uint( y ); Ck = uint( z ); Cijk = __umul24( Ck, NGrid_C2 ) + __umul24( Cj, NGrid_C ) + Ci; Fijk = __umul24( Fk, NGrid_F2 ) + __umul24( Fj, NGrid_F ) + Fi; Coeff_xm = Ci + 1U - x; Coeff_ym = Cj + 1U - y; Coeff_zm = Ck + 1U - z; Coeff_xp = (real)1.0 - Coeff_xm; Coeff_yp = (real)1.0 - Coeff_ym; Coeff_zp = (real)1.0 - Coeff_zm; FData[Fijk] += Coeff_zm * Coeff_ym * Coeff_xm * CData[ Cijk ] + Coeff_zm * Coeff_ym * Coeff_xp * CData[ Cijk + Cdi ] + Coeff_zm * Coeff_yp * Coeff_xm * CData[ Cijk + Cdj ] + Coeff_zp * Coeff_ym * Coeff_xm * CData[ Cijk + Cdk ] + Coeff_zm * Coeff_yp * Coeff_xp * CData[ Cijk + Cdj + Cdi ] + Coeff_zp * Coeff_yp * Coeff_xm * CData[ Cijk + Cdk + Cdj ] + Coeff_zp * Coeff_ym * Coeff_xp * CData[ Cijk + Cdk + Cdi ] + Coeff_zp * Coeff_yp * Coeff_xp * CData[ Cijk + Cdk + Cdj + Cdi ]; FIdx += POT_NTHREAD; } // while ( FIdx < NGrid_Fm2_3 ) __syncthreads(); } // FUNCTION : Prolongate_and_Correct //------------------------------------------------------------------------------------------------------- // Function : EstimateError // Description : Estimate the L1 error // // Note : 1. "__syncthreads" is invoked in the end of this function // 2. Shared-memory arrays "s_Error" and "s_SolSum" are used for GPU reduction // // Parameter : Sol : 1D array storing the input solution to the Poisson equation // RHS : 1D array storing the RHS of the Poisson equation // dh : Grid size // s_Error : Shared-memory array to store the L1 error // s_SolSum : Shared-memroy array to store the sum of solution // tid : Thread index //------------------------------------------------------------------------------------------------------- __device__ void EstimateError( const real *Sol, const real *RHS, const real dh, real *s_Error, real *s_SolSum, const uint tid ) { # define NGRID_M2 ( NGRID_LV0 - 2U ) const real dh2 = dh*dh; const real _dh2 = (real)-1.0/dh2; const uint di = 1U; const uint dj = NGRID_LV0; const uint dk = NGRID_LV0*NGRID_LV0; const uint FloorPow2 = 1<<(31-__clz(POT_NTHREAD) ); // largest power-of-two value not greater than POT_NTHREAD const uint Remain = POT_NTHREAD - FloorPow2; uint i, j, k, ip, jp, kp, im, jm, km, ijk; uint Idx = tid; s_Error [tid] = (real)0.0; s_SolSum[tid] = (real)0.0; // 1. calculate defect while ( Idx < NGRID_M2*NGRID_M2*NGRID_M2 ) { i = 1U + Idx%NGRID_M2; j = 1U + Idx%(NGRID_M2*NGRID_M2)/NGRID_M2; k = 1U + Idx/(NGRID_M2*NGRID_M2); ijk = __umul24( k, NGRID_LV0*NGRID_LV0 ) + __umul24( j, NGRID_LV0 ) + i; ip = ijk + di; jp = ijk + dj; kp = ijk + dk; im = ijk - di; jm = ijk - dj; km = ijk - dk; s_Error [tid] += FABS( _dh2*( Sol[kp]+Sol[km]+Sol[jp]+Sol[jm]+Sol[ip]+Sol[im]-(real)6.0*Sol[ijk] ) + RHS[ijk] ); s_SolSum[tid] += FABS( Sol[ijk] ); Idx += POT_NTHREAD; } // while ( Idx < NGRID_M2*NGRID_M2*NGRID_M2 ) __syncthreads(); // 2. perform the reduction operation to get the L1 error // first sum up the elements larger than FloorPow2 to ensure that the number of remaining elements is power-of-two if ( tid < Remain ) { s_Error [tid] += s_Error [ tid + FloorPow2 ]; s_SolSum[tid] += s_SolSum[ tid + FloorPow2 ]; } // parallel reduction # if ( POT_NTHREAD >= 1024 ) # error : ERROR : POT_NTHREAD must < 1024 !! # endif # if ( POT_NTHREAD >= 512 ) if ( tid < 256 ) { s_Error [tid] += s_Error [ tid + 256 ]; s_SolSum[tid] += s_SolSum[ tid + 256 ]; } __syncthreads(); # endif # if ( POT_NTHREAD >= 256 ) if ( tid < 128 ) { s_Error [tid] += s_Error [ tid + 128 ]; s_SolSum[tid] += s_SolSum[ tid + 128 ]; } __syncthreads(); # endif # if ( POT_NTHREAD >= 128 ) if ( tid < 64 ) { s_Error [tid] += s_Error [ tid + 64 ]; s_SolSum[tid] += s_SolSum[ tid + 64 ]; } __syncthreads(); # endif // adopting warp-synchronous mechanism if ( tid < 32 ) { // declare volatile pointer to ensure that the operations are not reordered volatile real *s_vErr = s_Error; volatile real *s_vSol = s_SolSum; s_vErr[tid] += s_vErr[tid+32]; // here we have assumed that POT_NTHREAD >= 64 s_vErr[tid] += s_vErr[tid+16]; s_vErr[tid] += s_vErr[tid+ 8]; s_vErr[tid] += s_vErr[tid+ 4]; s_vErr[tid] += s_vErr[tid+ 2]; s_vErr[tid] += s_vErr[tid+ 1]; s_vSol[tid] += s_vSol[tid+32]; s_vSol[tid] += s_vSol[tid+16]; s_vSol[tid] += s_vSol[tid+ 8]; s_vSol[tid] += s_vSol[tid+ 4]; s_vSol[tid] += s_vSol[tid+ 2]; s_vSol[tid] += s_vSol[tid+ 1]; s_vErr[tid] = dh2*s_vErr[tid]/s_vSol[tid]; } __syncthreads(); # undef NGRID_M2 } // FUNCTION : EstimateError #endif // #if ( defined GRAVITY && defined GPU && POT_SCHEME == MG )
a7981cc05ae73e607c796e58ca6b9912f4640ea4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_hip.cuh" __device__ float3 body_body_interaction(const CUParticle pi, const CUParticle pj) { static const float D = 420; static const float epsilon = 47.0975; static const float M[4] = {1.9549e20, 7.4161e19}; static const float K[4] = {5.8228e14, 2.29114e14}; static const float KRP[4] = {0.02, 0.01}; static const float SDP[4] = {0.002, 0.001}; static const float G = 6.67408e-20; // These arbitrary scalings might not necessary if we use better // numerical techniques static const float weirdscale1 = 1e-16; static const float weirdscale2 = 1e-16; const auto diff = pj.pos - pi.pos; const auto next_diff = ((pj.pos + pj.velocity * 1e-5) - (pi.pos + pi.velocity * 1e-5)); const int ti = pi.type >= 2 ? pi.type - 2 : pi.type; const int tj = pj.type >= 2 ? pj.type - 2 : pj.type; // Iron has a larger shell, and would get penetrated first in the // ifs. Largest iron=0, smallest silicate=1. We can use these // instead of ti, tj. If they're different then these will be the // two different ones, if they're the same they will be equal to // ti=tj. const auto tlarge = min(ti, tj); const auto tsmall = max(ti, tj); const float r = fmax(length(diff), epsilon); const float next_r = length(next_diff); const auto dir = diff / r; // pre-computed values const float r2 = r * r; const float gmm = G * M[ti] * M[tj] * (1 / r2) * weirdscale1; const float dmr = (D * D - r2) * 0.5 * weirdscale2; float force = gmm; if (r < D) { float KRPlarge = next_r > r && r <= D * (1.0 - SDP[tlarge]) ? KRP[tlarge] : 1.0; float KRPsmall = next_r > r && r <= D * (1.0 - SDP[tsmall]) ? KRP[tsmall] : 1.0; force -= dmr * (K[tsmall] * KRPsmall + K[tlarge] * KRPlarge); } return dir * force; } __global__ void calculate_forces(const CUParticle *particles, float3 *forces, size_t n, float dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; // Want a local force to write to in each loop to save some global // bandwidth const CUParticle my_part = particles[i]; float3 force_acc = make_float3(0.0, 0.0, 0.0); extern __shared__ CUParticle sh_part[]; const size_t sync_size = blockDim.x; const size_t sync_points = n / sync_size; for (size_t sync = 0; sync < sync_points; ++sync) { // read global memory and put in sh_part instead. // put in some j corresponding to this threads idx. sh_part[threadIdx.x] = particles[sync * sync_size + threadIdx.x]; __syncthreads(); for (size_t j = 0; j < sync_size; ++j) { if (sync * sync_size + j == i) { continue; } force_acc += body_body_interaction(my_part, sh_part[j]); } __syncthreads(); } forces[i] = force_acc; } __global__ void apply_forces(CUParticle *particles, float3 *forces, size_t n, float dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; // We always update pos just after velocity to avoid another kernel. particles[i].velocity += forces[i] * dt; particles[i].pos += particles[i].velocity * dt; } __global__ void first_apply_forces(CUParticle *particles, float3 *forces, size_t n, float dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; // First leapfrog step v_{1/2}, need to update vel by only dt/2. // We always update pos just after velocity to avoid another kernel. particles[i].velocity += forces[i] * dt / 2; particles[i].pos += particles[i].velocity * dt; } void first_update(WorldState *world, float dt) { const auto N = world->particles.size(); const auto block_size = world->block_size; hipLaunchKernelGGL(( calculate_forces), dim3((N + block_size - 1) / block_size), dim3(block_size), block_size * sizeof(CUParticle), 0, world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(hipPeekAtLastError()); hipLaunchKernelGGL(( first_apply_forces), dim3((N + block_size - 1) / block_size), dim3(block_size), 0, 0, world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(hipPeekAtLastError()); hipLaunchKernelGGL(( update_GL), dim3((N + block_size - 1) / block_size), dim3(block_size), 0, 0, world->gpu.particles, world->gpu.glptr, N); CUDAERR(hipPeekAtLastError()); // Synchronize CUDA so that the timings are correct CUDAERR(hipDeviceSynchronize()); } void update(WorldState *world, float dt) { const auto N = world->particles.size(); const auto block_size = world->block_size; hipLaunchKernelGGL(( calculate_forces), dim3((N + block_size - 1) / block_size), dim3(block_size), block_size * sizeof(CUParticle), 0, world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(hipPeekAtLastError()); hipLaunchKernelGGL(( apply_forces), dim3((N + block_size - 1) / block_size), dim3(block_size), 0, 0, world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(hipPeekAtLastError()); hipLaunchKernelGGL(( update_GL), dim3((N + block_size - 1) / block_size), dim3(block_size), 0, 0, world->gpu.particles, world->gpu.glptr, N); CUDAERR(hipPeekAtLastError()); // Synchronize CUDA so that the timings are correct CUDAERR(hipDeviceSynchronize()); }
a7981cc05ae73e607c796e58ca6b9912f4640ea4.cu
#include "kernel.cuh" __device__ float3 body_body_interaction(const CUParticle pi, const CUParticle pj) { static const float D = 420; static const float epsilon = 47.0975; static const float M[4] = {1.9549e20, 7.4161e19}; static const float K[4] = {5.8228e14, 2.29114e14}; static const float KRP[4] = {0.02, 0.01}; static const float SDP[4] = {0.002, 0.001}; static const float G = 6.67408e-20; // These arbitrary scalings might not necessary if we use better // numerical techniques static const float weirdscale1 = 1e-16; static const float weirdscale2 = 1e-16; const auto diff = pj.pos - pi.pos; const auto next_diff = ((pj.pos + pj.velocity * 1e-5) - (pi.pos + pi.velocity * 1e-5)); const int ti = pi.type >= 2 ? pi.type - 2 : pi.type; const int tj = pj.type >= 2 ? pj.type - 2 : pj.type; // Iron has a larger shell, and would get penetrated first in the // ifs. Largest iron=0, smallest silicate=1. We can use these // instead of ti, tj. If they're different then these will be the // two different ones, if they're the same they will be equal to // ti=tj. const auto tlarge = min(ti, tj); const auto tsmall = max(ti, tj); const float r = fmax(length(diff), epsilon); const float next_r = length(next_diff); const auto dir = diff / r; // pre-computed values const float r2 = r * r; const float gmm = G * M[ti] * M[tj] * (1 / r2) * weirdscale1; const float dmr = (D * D - r2) * 0.5 * weirdscale2; float force = gmm; if (r < D) { float KRPlarge = next_r > r && r <= D * (1.0 - SDP[tlarge]) ? KRP[tlarge] : 1.0; float KRPsmall = next_r > r && r <= D * (1.0 - SDP[tsmall]) ? KRP[tsmall] : 1.0; force -= dmr * (K[tsmall] * KRPsmall + K[tlarge] * KRPlarge); } return dir * force; } __global__ void calculate_forces(const CUParticle *particles, float3 *forces, size_t n, float dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; // Want a local force to write to in each loop to save some global // bandwidth const CUParticle my_part = particles[i]; float3 force_acc = make_float3(0.0, 0.0, 0.0); extern __shared__ CUParticle sh_part[]; const size_t sync_size = blockDim.x; const size_t sync_points = n / sync_size; for (size_t sync = 0; sync < sync_points; ++sync) { // read global memory and put in sh_part instead. // put in some j corresponding to this threads idx. sh_part[threadIdx.x] = particles[sync * sync_size + threadIdx.x]; __syncthreads(); for (size_t j = 0; j < sync_size; ++j) { if (sync * sync_size + j == i) { continue; } force_acc += body_body_interaction(my_part, sh_part[j]); } __syncthreads(); } forces[i] = force_acc; } __global__ void apply_forces(CUParticle *particles, float3 *forces, size_t n, float dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; // We always update pos just after velocity to avoid another kernel. particles[i].velocity += forces[i] * dt; particles[i].pos += particles[i].velocity * dt; } __global__ void first_apply_forces(CUParticle *particles, float3 *forces, size_t n, float dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; // First leapfrog step v_{1/2}, need to update vel by only dt/2. // We always update pos just after velocity to avoid another kernel. particles[i].velocity += forces[i] * dt / 2; particles[i].pos += particles[i].velocity * dt; } void first_update(WorldState *world, float dt) { const auto N = world->particles.size(); const auto block_size = world->block_size; calculate_forces<<<(N + block_size - 1) / block_size, block_size, block_size * sizeof(CUParticle)>>>( world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(cudaPeekAtLastError()); first_apply_forces<<<(N + block_size - 1) / block_size, block_size>>>( world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(cudaPeekAtLastError()); update_GL<<<(N + block_size - 1) / block_size, block_size>>>( world->gpu.particles, world->gpu.glptr, N); CUDAERR(cudaPeekAtLastError()); // Synchronize CUDA so that the timings are correct CUDAERR(cudaDeviceSynchronize()); } void update(WorldState *world, float dt) { const auto N = world->particles.size(); const auto block_size = world->block_size; calculate_forces<<<(N + block_size - 1) / block_size, block_size, block_size * sizeof(CUParticle)>>>( world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(cudaPeekAtLastError()); apply_forces<<<(N + block_size - 1) / block_size, block_size>>>( world->gpu.particles, world->gpu.velocities, N, dt); CUDAERR(cudaPeekAtLastError()); update_GL<<<(N + block_size - 1) / block_size, block_size>>>( world->gpu.particles, world->gpu.glptr, N); CUDAERR(cudaPeekAtLastError()); // Synchronize CUDA so that the timings are correct CUDAERR(cudaDeviceSynchronize()); }
f9501ba1741a47f7778e2d197c7c0793ef802f2c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <hip/hip_runtime.h> #include <string.h> /* * compile: nvcc .\vecadd.cu -o vecadd * run: ./vecadd <int: size of the vector> <int: block size> */ int *a, *b; // host data int *c, *c2; // results int sample_size = 10; double time_d = 0; double time_h = 0; int n; // size of the vector __global__ void vecAdd(int *A,int *B,int *C,int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < N) C[i] = A[i] + B[i]; } void vecAdd_h(int *A1,int *B1, int *C1, int N) { for(int i=0;i<N;i++) C1[i] = A1[i] + B1[i]; } int main(int argc,char **argv) { printf("Begin \n"); n = strtol(argv[1], NULL, 10); int nBytes = n*sizeof(int); int block_size, block_no; a = (int *)malloc(nBytes); b = (int *)malloc(nBytes); c = (int *)malloc(nBytes); c2 = (int *)malloc(nBytes); int *a_d,*b_d,*c_d; block_size = strtol(argv[2], NULL, 10); block_no = ceil(n/block_size); dim3 dimBlock(block_size,1,1); dim3 dimGrid(block_no,1,1); for(int i = 0; i < n; i++ ) { a[i] = sin(i)*sin(i); b[i] = cos(i)*cos(i); } for(int _ = 0; _ < sample_size; _ += 1) { printf("Allocating device memory on host..\n"); hipMalloc((void **)&a_d,n*sizeof(int)); hipMalloc((void **)&b_d,n*sizeof(int)); hipMalloc((void **)&c_d,n*sizeof(int)); printf("Copying to device..\n"); hipMemcpy(a_d,a,n*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(b_d,b,n*sizeof(int),hipMemcpyHostToDevice); clock_t start_d=clock(); printf("Doing GPU Vector add\n"); hipLaunchKernelGGL(( vecAdd), dim3(dimGrid), dim3(dimBlock), 0, 0, a_d,b_d,c_d,n); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) ); exit(-1); } clock_t end_d = clock(); clock_t start_h = clock(); printf("Doing CPU Vector add\n"); vecAdd_h(a,b,c2,n); clock_t end_h = clock(); time_d += (double)(end_d-start_d)/CLOCKS_PER_SEC; time_h += (double)(end_h-start_h)/CLOCKS_PER_SEC; hipMemcpy(c,c_d,n*sizeof(int),hipMemcpyDeviceToHost); // for(int i = 0; i < n; i += 1) // { // printf("%d : %d\n",i, c[i]); // } printf("compare results code : %d\n",memcmp(c, c2, n*sizeof(int))); hipFree(a_d); hipFree(b_d); hipFree(c_d); } printf("Number of elements: %d GPU Time: %f CPU Time: %f\n", n, time_d/sample_size, time_h/sample_size); return 0; }
f9501ba1741a47f7778e2d197c7c0793ef802f2c.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <cuda.h> #include <string.h> /* * compile: nvcc .\vecadd.cu -o vecadd * run: ./vecadd <int: size of the vector> <int: block size> */ int *a, *b; // host data int *c, *c2; // results int sample_size = 10; double time_d = 0; double time_h = 0; int n; // size of the vector __global__ void vecAdd(int *A,int *B,int *C,int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < N) C[i] = A[i] + B[i]; } void vecAdd_h(int *A1,int *B1, int *C1, int N) { for(int i=0;i<N;i++) C1[i] = A1[i] + B1[i]; } int main(int argc,char **argv) { printf("Begin \n"); n = strtol(argv[1], NULL, 10); int nBytes = n*sizeof(int); int block_size, block_no; a = (int *)malloc(nBytes); b = (int *)malloc(nBytes); c = (int *)malloc(nBytes); c2 = (int *)malloc(nBytes); int *a_d,*b_d,*c_d; block_size = strtol(argv[2], NULL, 10); block_no = ceil(n/block_size); dim3 dimBlock(block_size,1,1); dim3 dimGrid(block_no,1,1); for(int i = 0; i < n; i++ ) { a[i] = sin(i)*sin(i); b[i] = cos(i)*cos(i); } for(int _ = 0; _ < sample_size; _ += 1) { printf("Allocating device memory on host..\n"); cudaMalloc((void **)&a_d,n*sizeof(int)); cudaMalloc((void **)&b_d,n*sizeof(int)); cudaMalloc((void **)&c_d,n*sizeof(int)); printf("Copying to device..\n"); cudaMemcpy(a_d,a,n*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(b_d,b,n*sizeof(int),cudaMemcpyHostToDevice); clock_t start_d=clock(); printf("Doing GPU Vector add\n"); vecAdd<<<dimGrid, dimBlock>>>(a_d,b_d,c_d,n); cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) ); exit(-1); } clock_t end_d = clock(); clock_t start_h = clock(); printf("Doing CPU Vector add\n"); vecAdd_h(a,b,c2,n); clock_t end_h = clock(); time_d += (double)(end_d-start_d)/CLOCKS_PER_SEC; time_h += (double)(end_h-start_h)/CLOCKS_PER_SEC; cudaMemcpy(c,c_d,n*sizeof(int),cudaMemcpyDeviceToHost); // for(int i = 0; i < n; i += 1) // { // printf("%d : %d\n",i, c[i]); // } printf("compare results code : %d\n",memcmp(c, c2, n*sizeof(int))); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } printf("Number of elements: %d GPU Time: %f CPU Time: %f\n", n, time_d/sample_size, time_h/sample_size); return 0; }
19752c628a0379e43cb94f6d4a73443337be741f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <rocblas.h> __device__ float merge(float old,float opOutput,float *extraParams) { return fmaxf(old,opOutput); } __device__ float update(float old,float opOutput,float *extraParams) { return fmaxf(old,opOutput); } __device__ float op(float d1,float *extraParams) { return d1; } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return reduction; } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ __device__ void transform(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { extern __shared__ float sPartials[]; extern __shared__ int indexes[]; int tid = threadIdx.x; int totalThreads = gridDim.x * blockDim.x; int start = blockDim.x * blockIdx.x + tid; if(start >= n) return; float sum = extraParams[0]; int index = start; for ( int i = start; i < n; i += totalThreads) { float curr = dx[i * incx]; if(curr > sum) { index = i * incx; } sum = update(sum,op(curr,extraParams),extraParams); sPartials[tid] = sum; } sPartials[tid] = sum; indexes[tid] = index; __syncthreads(); // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. int floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while ( floorPow2 & (floorPow2 - 1) ) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { float sPartialBack = sPartials[tid - floorPow2]; float currTid = sPartials[tid]; indexes[tid - floorPow2] = indexes[tid]; sPartials[tid - floorPow2] = merge(sPartialBack,currTid,extraParams); } __syncthreads(); } for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) { if (tid < activeThreads) { indexes[tid] = indexes[tid + activeThreads]; sPartials[tid] = merge(sPartials[tid],sPartials[tid + activeThreads],extraParams); } __syncthreads(); } if(tid == 0) { result[blockIdx.x] = postProcess(sPartials[0],n,xOffset,dx,incx,extraParams,result); float val5 = indexes[blockIdx.x]; result[blockIdx.x] = indexes[blockIdx.x] / incx; } } extern "C" __global__ void iamax_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
19752c628a0379e43cb94f6d4a73443337be741f.cu
extern "C" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <cublas_v2.h> __device__ float merge(float old,float opOutput,float *extraParams) { return fmaxf(old,opOutput); } __device__ float update(float old,float opOutput,float *extraParams) { return fmaxf(old,opOutput); } __device__ float op(float d1,float *extraParams) { return d1; } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return reduction; } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ __device__ void transform(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { extern __shared__ float sPartials[]; extern __shared__ int indexes[]; int tid = threadIdx.x; int totalThreads = gridDim.x * blockDim.x; int start = blockDim.x * blockIdx.x + tid; if(start >= n) return; float sum = extraParams[0]; int index = start; for ( int i = start; i < n; i += totalThreads) { float curr = dx[i * incx]; if(curr > sum) { index = i * incx; } sum = update(sum,op(curr,extraParams),extraParams); sPartials[tid] = sum; } sPartials[tid] = sum; indexes[tid] = index; __syncthreads(); // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. int floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while ( floorPow2 & (floorPow2 - 1) ) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { float sPartialBack = sPartials[tid - floorPow2]; float currTid = sPartials[tid]; indexes[tid - floorPow2] = indexes[tid]; sPartials[tid - floorPow2] = merge(sPartialBack,currTid,extraParams); } __syncthreads(); } for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) { if (tid < activeThreads) { indexes[tid] = indexes[tid + activeThreads]; sPartials[tid] = merge(sPartials[tid],sPartials[tid + activeThreads],extraParams); } __syncthreads(); } if(tid == 0) { result[blockIdx.x] = postProcess(sPartials[0],n,xOffset,dx,incx,extraParams,result); float val5 = indexes[blockIdx.x]; result[blockIdx.x] = indexes[blockIdx.x] / incx; } } extern "C" __global__ void iamax_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
76b3257d1299e89d5c13477c9ef2f6106fbec719.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; }
76b3257d1299e89d5c13477c9ef2f6106fbec719.cu
#include "includes.h" extern "C" { } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; }
58e824982a4a9471ec765982a11f025455005c33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<vector> #include"Geometry.h" #include"PathTracing.cuh" //__global__ void KernelInit() //{ // //} void PathTracer::Init() { std::cout << "Path Tracer Initialized" << std::endl; }
58e824982a4a9471ec765982a11f025455005c33.cu
#include<iostream> #include<vector> #include"Geometry.h" #include"PathTracing.cuh" //__global__ void KernelInit() //{ // //} void PathTracer::Init() { std::cout << "Path Tracer Initialized" << std::endl; }
17e81836c9277a25e258cad6fb58051a608529f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> ///////////per request timing. L1 enabled. P100. ///////////For the second iteration, after data size 1gb, L2 tlb misses sparsely appear. ///////////After data size 512MB, L1 tlb misses sparsely appear. ///////////For the first iteration, managed memory migrates pages on demand. ///////////After the migration, L1 and L2 tlbs of the page will be filled, L2 cache will also be prefetched. ///////////1700s and 1900s are coincidence, but 1600s is not. //typedef unsigned char byte; void init_cpu_data(int* A, long long int size, int stride, long long int mod){ for (long long int i = 0; i < size; i = i + stride){ A[i]=(i + stride) % mod; } for (long long int i = 32; i < size; i = i + stride){ A[i]=(i + stride) % mod; } } __device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines for (int it = 0; it < iterations; it++){ j = A[j]; } B[0] = j; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines //long long int start_time = 0;//////clock //long long int end_time = 0;//////clock //start_time = clock64();//////clock for (int it = 0; it < iterations; it++){ j = A[j]; } //end_time=clock64();//////clock //long long int total_time = end_time - start_time;//////clock //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! ( B[0] = j; //B[1] = (int) total_time; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside? //////shared memory: 0xc000 max (49152 Bytes = 48KB) __shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations. __shared__ int s_index[1024 * 4]; //__shared__ int s_index[1]; int j = starting_index;/////make them in the same page, and miss near in cache lines //int j = B[0]; long long int start_time = 0;//////clock long long int end_time = 0;//////clock long long int time_interval = 0;//////clock //long long int total_time = end_time - start_time;//////clock /* for (int it = 0; it < iterations; it++){ start_time = clock64();//////clock j = A[j]; //s_index[it] = j; end_time=clock64();//////clock s_tvalue[it] = end_time - start_time; } */ asm(".reg .u64 t1;\n\t" ".reg .u64 t2;\n\t"); long long int counter = 0; for (long long int it = 0; it < iterations; it++){ /* asm("mul.wide.u32 t1, %3, %5;\n\t" "add.u64 t2, t1, %4;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %2, [t2];\n\t" "mov.u64 %1, %clock64;" : "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); */ asm("mul.wide.u32 t1, %2, %4;\n\t" "add.u64 t2, t1, %3;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %1, [t2];\n\t" : "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); s_index[0] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add). asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time)); time_interval = end_time - start_time; if(time_interval > 300000){ s_tvalue[counter] = time_interval; counter++; } } //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency B[0] = j; for (long long int it = 0; it < 4096; it++){ C[it] = s_index[it]; D[it] = s_tvalue[it]; } } __global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){ long long int reduced_iter = iterations; if(reduced_iter > 2048){ reduced_iter = 2048; }else if(reduced_iter < 16){ reduced_iter = 16; } ///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory. //P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2 P_chasing2(0, A, iterations, B, C, D, 0, clock_rate, data_stride);////////partially print the data __syncthreads(); } int main(int argc, char **argv) { printf("\n"); // set device hipDeviceProp_t device_prop; //int dev_id = findCudaDevice(argc, (const char **) argv); int dev_id = 0; checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id)); int peak_clk = 1;//kHz checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id)); float clock_rate = (float) peak_clk; //printf("clock_rate_out_kernel:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == hipComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } if (device_prop.concurrentManagedAccess == 1){ printf("This device supports concurrent Managed Access.\n"); }else{ printf("This device does not support concurrent Managed Access.\n"); } int value1 = 1; checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id)); printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1); ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out; checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2)); FILE * pFile; pFile = fopen ("output.txt","w"); int counter = 0; /////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb) for(int data_stride = 1 * 1 * 256; data_stride <= 1 * 1 * 256; data_stride = data_stride * 2){/////////32mb stride //data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit. //printf("###################data_stride%d#########################\n", data_stride); //for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines. for(long long int mod2 = 1073741824; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. long long int mod = mod2; if(mod > 2684354560){ mod = 2684354560; } long long int data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 int *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory init_cpu_data(CPU_data_in, data_size, data_stride, mod); long long int reduced_iter = iterations; if(reduced_iter > 4096){ reduced_iter = 4096; }else if(reduced_iter < 16){ reduced_iter = 16; } int *CPU_data_out_index; CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size)); //hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out_index; checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here hipDeviceSynchronize(); hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, hipMemcpyDeviceToHost); hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(hipFree(GPU_data_out_index)); checkCudaErrors(hipFree(GPU_data_out_time)); //checkCudaErrors(hipFree(GPU_data_in)); checkCudaErrors(hipFree(CPU_data_in)); //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } //printf("############################################\n\n"); } checkCudaErrors(hipFree(GPU_data_out)); //free(CPU_data_out); fclose (pFile); exit(EXIT_SUCCESS); }
17e81836c9277a25e258cad6fb58051a608529f7.cu
#include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> ///////////per request timing. L1 enabled. P100. ///////////For the second iteration, after data size 1gb, L2 tlb misses sparsely appear. ///////////After data size 512MB, L1 tlb misses sparsely appear. ///////////For the first iteration, managed memory migrates pages on demand. ///////////After the migration, L1 and L2 tlbs of the page will be filled, L2 cache will also be prefetched. ///////////1700s and 1900s are coincidence, but 1600s is not. //typedef unsigned char byte; void init_cpu_data(int* A, long long int size, int stride, long long int mod){ for (long long int i = 0; i < size; i = i + stride){ A[i]=(i + stride) % mod; } for (long long int i = 32; i < size; i = i + stride){ A[i]=(i + stride) % mod; } } __device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines for (int it = 0; it < iterations; it++){ j = A[j]; } B[0] = j; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines //long long int start_time = 0;//////clock //long long int end_time = 0;//////clock //start_time = clock64();//////clock for (int it = 0; it < iterations; it++){ j = A[j]; } //end_time=clock64();//////clock //long long int total_time = end_time - start_time;//////clock //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! ( B[0] = j; //B[1] = (int) total_time; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside? //////shared memory: 0xc000 max (49152 Bytes = 48KB) __shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations. __shared__ int s_index[1024 * 4]; //__shared__ int s_index[1]; int j = starting_index;/////make them in the same page, and miss near in cache lines //int j = B[0]; long long int start_time = 0;//////clock long long int end_time = 0;//////clock long long int time_interval = 0;//////clock //long long int total_time = end_time - start_time;//////clock /* for (int it = 0; it < iterations; it++){ start_time = clock64();//////clock j = A[j]; //s_index[it] = j; end_time=clock64();//////clock s_tvalue[it] = end_time - start_time; } */ asm(".reg .u64 t1;\n\t" ".reg .u64 t2;\n\t"); long long int counter = 0; for (long long int it = 0; it < iterations; it++){ /* asm("mul.wide.u32 t1, %3, %5;\n\t" "add.u64 t2, t1, %4;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %2, [t2];\n\t" "mov.u64 %1, %clock64;" : "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); */ asm("mul.wide.u32 t1, %2, %4;\n\t" "add.u64 t2, t1, %3;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %1, [t2];\n\t" : "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); s_index[0] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add). asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time)); time_interval = end_time - start_time; if(time_interval > 300000){ s_tvalue[counter] = time_interval; counter++; } } //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency B[0] = j; for (long long int it = 0; it < 4096; it++){ C[it] = s_index[it]; D[it] = s_tvalue[it]; } } __global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){ long long int reduced_iter = iterations; if(reduced_iter > 2048){ reduced_iter = 2048; }else if(reduced_iter < 16){ reduced_iter = 16; } ///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory. //P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2 P_chasing2(0, A, iterations, B, C, D, 0, clock_rate, data_stride);////////partially print the data __syncthreads(); } int main(int argc, char **argv) { printf("\n"); // set device cudaDeviceProp device_prop; //int dev_id = findCudaDevice(argc, (const char **) argv); int dev_id = 0; checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id)); int peak_clk = 1;//kHz checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id)); float clock_rate = (float) peak_clk; //printf("clock_rate_out_kernel:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == cudaComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } if (device_prop.concurrentManagedAccess == 1){ printf("This device supports concurrent Managed Access.\n"); }else{ printf("This device does not support concurrent Managed Access.\n"); } int value1 = 1; checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id)); printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1); ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out; checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2)); FILE * pFile; pFile = fopen ("output.txt","w"); int counter = 0; /////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb) for(int data_stride = 1 * 1 * 256; data_stride <= 1 * 1 * 256; data_stride = data_stride * 2){/////////32mb stride //data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit. //printf("###################data_stride%d#########################\n", data_stride); //for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines. for(long long int mod2 = 1073741824; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. long long int mod = mod2; if(mod > 2684354560){ mod = 2684354560; } long long int data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 int *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory init_cpu_data(CPU_data_in, data_size, data_stride, mod); long long int reduced_iter = iterations; if(reduced_iter > 4096){ reduced_iter = 4096; }else if(reduced_iter < 16){ reduced_iter = 16; } int *CPU_data_out_index; CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size)); //cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out int *GPU_data_out_index; checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here cudaDeviceSynchronize(); cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, cudaMemcpyDeviceToHost); cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(cudaFree(GPU_data_out_index)); checkCudaErrors(cudaFree(GPU_data_out_time)); //checkCudaErrors(cudaFree(GPU_data_in)); checkCudaErrors(cudaFree(CPU_data_in)); //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } //printf("############################################\n\n"); } checkCudaErrors(cudaFree(GPU_data_out)); //free(CPU_data_out); fclose (pFile); exit(EXIT_SUCCESS); }
a8e5b517368d2c46cd29c935933639be4a178c14.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/random.h> #include <thrust/transform.h> #include <string> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/dropout_op.h" #include "paddle/fluid/platform/dynload/hiprand/hiprand.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, typename MaskType> __global__ void RandomGenerator(const size_t n, const int seed, const float dropout_prob, const T* src, MaskType* mask_data, T* dst, bool is_upscale_in_train) { hiprandStatePhilox4_32_10_t state; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; MaskType mask; T dest; for (; idx < n; idx += blockDim.x * gridDim.x) { T s = src[idx]; if (step_size == 0) { hiprand_init(seed, idx, idx, &state); step_size = blockDim.x * gridDim.x; } else { hiprand_init(seed, idx, step_size, &state); } if (hiprand_uniform(&state) < dropout_prob) { mask = 0; dest = 0; } else { mask = 1; if (is_upscale_in_train) { dest = s / static_cast<T>(1.0f - dropout_prob); } else { dest = s; } } mask_data[idx] = mask; dst[idx] = dest; } } template <typename T, typename MaskType> __global__ void RandomGeneratorWithSeed(const size_t n, const int* seed, const float dropout_prob, const T* src, MaskType* mask_data, T* dst, bool is_upscale_in_train) { hiprandStatePhilox4_32_10_t state; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; MaskType mask; T dest; for (; idx < n; idx += blockDim.x * gridDim.x) { T s = src[idx]; if (step_size == 0) { hiprand_init(seed[0], idx, idx, &state); step_size = blockDim.x * gridDim.x; } else { hiprand_init(seed[0], idx, step_size, &state); } if (hiprand_uniform(&state) < dropout_prob) { mask = 0; dest = 0; } else { mask = 1; if (is_upscale_in_train) { dest = s / static_cast<T>(1.0f - dropout_prob); } else { dest = s; } } mask_data[idx] = mask; dst[idx] = dest; } } template <typename T, typename MaskType> __global__ void RandomGeneratorWithGenerator(const size_t n, uint64_t seed, const float dropout_prob, const T* src, MaskType* mask_data, T* dst, bool is_upscale_in_train, uint64_t increment) { hiprandStatePhilox4_32_10_t state; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; MaskType mask; T dest; for (; idx < n; idx += blockDim.x * gridDim.x) { T s = src[idx]; if (step_size == 0) { hiprand_init(seed, idx, increment, &state); step_size = blockDim.x * gridDim.x; } else { hiprand_init(seed, idx, increment, &state); } if (hiprand_uniform(&state) < dropout_prob) { mask = 0; dest = 0; } else { mask = 1; if (is_upscale_in_train) { dest = s / static_cast<T>(1.0f - dropout_prob); } else { dest = s; } } mask_data[idx] = mask; dst[idx] = dest; } } // It seems that Eigen::Tensor::setRandom in GPU will SEGFAULT. // Use std::random and thrust::random(thrust is a std library in CUDA) to // implement uniform random. template <typename Place, typename T> class GPUDropoutKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* seed = context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr; auto* y = context.Output<Tensor>("Out"); y->mutable_data<T>(context.GetPlace()); float dropout_prob = context.Attr<float>("dropout_prob"); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); bool upscale_in_train = (dropout_implementation == "upscale_in_train"); auto& place = *context.template device_context<Place>().eigen_device(); if (!context.Attr<bool>("is_test")) { int64_t x_numel = x->numel(); auto stream = context.cuda_device_context().stream(); auto* mask = context.Output<Tensor>("Mask"); auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace()); size_t size = framework::product(mask->dims()); auto* x_data = x->data<T>(); auto* y_data = y->mutable_data<T>(context.GetPlace()); if (dropout_prob == 1.0f) { PADDLE_ENFORCE_CUDA_SUCCESS( hipMemsetAsync(y_data, 0, x_numel * sizeof(T), stream)); PADDLE_ENFORCE_CUDA_SUCCESS(hipMemsetAsync( mask_data, 0, x_numel * sizeof(*mask_data), stream)); return; } int threads = 512; int grid = (x_numel + threads - 1) / threads; if (seed && platform::is_gpu_place(seed->place())) { auto seed_gpu_data = seed->data<int>(); hipLaunchKernelGGL(( RandomGeneratorWithSeed<T, uint8_t>), dim3(grid), dim3(threads), 0, stream, size, seed_gpu_data, dropout_prob, x_data, mask_data, y_data, upscale_in_train); return; } int seed_data; std::random_device rnd; if (seed) { seed_data = *(seed->data<int>()); } else { seed_data = context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd(); } int device_id = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace()) .GetDeviceId(); auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); if (gen_cuda->GetIsInitPy() && (!context.Attr<bool>("fix_seed"))) { auto seed_offset = gen_cuda->IncrementOffset(1); hipLaunchKernelGGL(( RandomGeneratorWithGenerator<T, uint8_t>), dim3(grid), dim3(threads), 0, stream, size, seed_offset.first, dropout_prob, x_data, mask_data, y_data, upscale_in_train, seed_offset.second); return; } hipLaunchKernelGGL(( RandomGenerator<T, uint8_t>), dim3(grid), dim3(threads), 0, stream, size, seed_data, dropout_prob, x_data, mask_data, y_data, upscale_in_train); } else { auto X = EigenMatrix<T>::Reshape(*x, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1); if (upscale_in_train) { Y.device(place) = X; } else { Y.device(place) = X * static_cast<T>(1.0f - dropout_prob); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( dropout, ops::GPUDropoutKernel<plat::CUDADeviceContext, float>, ops::GPUDropoutKernel<plat::CUDADeviceContext, plat::float16>, ops::GPUDropoutKernel<plat::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( dropout_grad, ops::DropoutGradKernel<plat::CUDADeviceContext, float>, ops::DropoutGradKernel<plat::CUDADeviceContext, plat::float16>, ops::DropoutGradKernel<plat::CUDADeviceContext, double>);
a8e5b517368d2c46cd29c935933639be4a178c14.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda.h> #include <curand_kernel.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/random.h> #include <thrust/transform.h> #include <string> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/dropout_op.h" #include "paddle/fluid/platform/dynload/curand.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, typename MaskType> __global__ void RandomGenerator(const size_t n, const int seed, const float dropout_prob, const T* src, MaskType* mask_data, T* dst, bool is_upscale_in_train) { curandStatePhilox4_32_10_t state; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; MaskType mask; T dest; for (; idx < n; idx += blockDim.x * gridDim.x) { T s = src[idx]; if (step_size == 0) { curand_init(seed, idx, idx, &state); step_size = blockDim.x * gridDim.x; } else { curand_init(seed, idx, step_size, &state); } if (curand_uniform(&state) < dropout_prob) { mask = 0; dest = 0; } else { mask = 1; if (is_upscale_in_train) { dest = s / static_cast<T>(1.0f - dropout_prob); } else { dest = s; } } mask_data[idx] = mask; dst[idx] = dest; } } template <typename T, typename MaskType> __global__ void RandomGeneratorWithSeed(const size_t n, const int* seed, const float dropout_prob, const T* src, MaskType* mask_data, T* dst, bool is_upscale_in_train) { curandStatePhilox4_32_10_t state; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; MaskType mask; T dest; for (; idx < n; idx += blockDim.x * gridDim.x) { T s = src[idx]; if (step_size == 0) { curand_init(seed[0], idx, idx, &state); step_size = blockDim.x * gridDim.x; } else { curand_init(seed[0], idx, step_size, &state); } if (curand_uniform(&state) < dropout_prob) { mask = 0; dest = 0; } else { mask = 1; if (is_upscale_in_train) { dest = s / static_cast<T>(1.0f - dropout_prob); } else { dest = s; } } mask_data[idx] = mask; dst[idx] = dest; } } template <typename T, typename MaskType> __global__ void RandomGeneratorWithGenerator(const size_t n, uint64_t seed, const float dropout_prob, const T* src, MaskType* mask_data, T* dst, bool is_upscale_in_train, uint64_t increment) { curandStatePhilox4_32_10_t state; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; MaskType mask; T dest; for (; idx < n; idx += blockDim.x * gridDim.x) { T s = src[idx]; if (step_size == 0) { curand_init(seed, idx, increment, &state); step_size = blockDim.x * gridDim.x; } else { curand_init(seed, idx, increment, &state); } if (curand_uniform(&state) < dropout_prob) { mask = 0; dest = 0; } else { mask = 1; if (is_upscale_in_train) { dest = s / static_cast<T>(1.0f - dropout_prob); } else { dest = s; } } mask_data[idx] = mask; dst[idx] = dest; } } // It seems that Eigen::Tensor::setRandom in GPU will SEGFAULT. // Use std::random and thrust::random(thrust is a std library in CUDA) to // implement uniform random. template <typename Place, typename T> class GPUDropoutKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* seed = context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr; auto* y = context.Output<Tensor>("Out"); y->mutable_data<T>(context.GetPlace()); float dropout_prob = context.Attr<float>("dropout_prob"); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); bool upscale_in_train = (dropout_implementation == "upscale_in_train"); auto& place = *context.template device_context<Place>().eigen_device(); if (!context.Attr<bool>("is_test")) { int64_t x_numel = x->numel(); auto stream = context.cuda_device_context().stream(); auto* mask = context.Output<Tensor>("Mask"); auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace()); size_t size = framework::product(mask->dims()); auto* x_data = x->data<T>(); auto* y_data = y->mutable_data<T>(context.GetPlace()); if (dropout_prob == 1.0f) { PADDLE_ENFORCE_CUDA_SUCCESS( cudaMemsetAsync(y_data, 0, x_numel * sizeof(T), stream)); PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemsetAsync( mask_data, 0, x_numel * sizeof(*mask_data), stream)); return; } int threads = 512; int grid = (x_numel + threads - 1) / threads; if (seed && platform::is_gpu_place(seed->place())) { auto seed_gpu_data = seed->data<int>(); RandomGeneratorWithSeed<T, uint8_t><<<grid, threads, 0, stream>>>( size, seed_gpu_data, dropout_prob, x_data, mask_data, y_data, upscale_in_train); return; } int seed_data; std::random_device rnd; if (seed) { seed_data = *(seed->data<int>()); } else { seed_data = context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd(); } int device_id = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace()) .GetDeviceId(); auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); if (gen_cuda->GetIsInitPy() && (!context.Attr<bool>("fix_seed"))) { auto seed_offset = gen_cuda->IncrementOffset(1); RandomGeneratorWithGenerator<T, uint8_t><<<grid, threads, 0, stream>>>( size, seed_offset.first, dropout_prob, x_data, mask_data, y_data, upscale_in_train, seed_offset.second); return; } RandomGenerator<T, uint8_t><<<grid, threads, 0, stream>>>( size, seed_data, dropout_prob, x_data, mask_data, y_data, upscale_in_train); } else { auto X = EigenMatrix<T>::Reshape(*x, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1); if (upscale_in_train) { Y.device(place) = X; } else { Y.device(place) = X * static_cast<T>(1.0f - dropout_prob); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( dropout, ops::GPUDropoutKernel<plat::CUDADeviceContext, float>, ops::GPUDropoutKernel<plat::CUDADeviceContext, plat::float16>, ops::GPUDropoutKernel<plat::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( dropout_grad, ops::DropoutGradKernel<plat::CUDADeviceContext, float>, ops::DropoutGradKernel<plat::CUDADeviceContext, plat::float16>, ops::DropoutGradKernel<plat::CUDADeviceContext, double>);
dfc4e29f04b92ee0f06434524029930ce2c0614a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include "myhelpers.h" #include "d2func.h" #include "d3func.h" #include "Globalvars.h" #include "Enfunc.h" #include "Gradfunc.h" #include "ConGradfunc.h" double unitrand() { /* * Arguments: none * Returns: a "random" number between 0 and 1 */ return (((double)rand())/((double)(RAND_MAX))); } void initcond(double3 *x, double2 *ang, double *len, double *diam) { /* * Arguments: the 3d position array, the 2d angular orientation array, the length array, and * the diameter array * Returns: generates an initial random packing for our system, the position array will be in * cylindrical coordinates and will need to be converted to cartesian later */ int i; srand(time(NULL)); for(i=0;i<npart;i++) { x[i].x=R*unitrand(); x[i].y=2*M_PI*unitrand(); x[i].z=H*unitrand(); len[i]=0.0; diam[i]=0.0; ang[i].x=unitrand()*2*M_PI; ang[i].y=unitrand()*2*M_PI; } } double packfrac() { /* * Arguments: none * Returns: calculates and returns the packing fraction (currently only works if all particles * are the same size, if you want to fix that uncomment the for loop and don't multiply * Vp by npart and make that "=" a "+=") */ double Vp=0.0; double Vb=M_PI*R*R*H; int i=0; //for(i=0;i<npart;i++) { Vp=npart*(M_PI*(sigma[i]/2.0)*(sigma[i]/2.0)*l[i]+(4.0/3.0)*M_PI*(sigma[i]/2.0)*(sigma[i]/2.0)*(sigma[i]/2.0)); //} return Vp/Vb; } void updatephi() { /* * Arguments: none * Returns: updates our l and sigma values based on the current value of our global variable * phi, which represents our packing fraction (used to more easily iterate through packing * fractions rather than iterate through l and sigma values) */ double Vcont; double Vpart; if(CUBE==1) { Vcont=LENGTH*WIDTH*HEIGHT; } else { Vcont=M_PI*R*R*H; } Vpart=phi*Vcont; int i; for(i=0;i<npart;i++) { sigma[i]=pow((Vpart/npart)/(M_PI/6.0+M_PI*ALPHA/4.0),1.0/3.0); l[i]=ALPHA*sigma[i]; } } __global__ void updatephikernel(double* l, double* sigma, double* params, double* phi) { int tx=threadIdx.x+blockIdx.x*blockDim.x; double Vcont, Vpart; int npart; double R,H,ALPHA; npart=(int)params[0]; R=params[1]; H=params[2]; ALPHA=params[3]; Vcont=M_PI*R*R*H; Vpart=phi[0]*Vcont; sigma[tx]=pow((Vpart/npart)/(M_PI/6.0+M_PI*ALPHA/4.0),1.0/3.0); l[tx]=ALPHA*sigma[tx]; } __global__ void variabledphi(double* params, double U) { if(U<1e-9) { params[4]=params[4]*1.1; } else { params[4]=params[4]*0.9; } } __global__ void DeviceAdd(int i, double* a, double b) { a[i]+=b; } double Pressure() { /* * Arguments: none * Returns: calculates and returns the total pressure that the particles exert on the walls */ int i; double3 F; //F.x is the force on the radial wall, F.y is on the top wall, F.z -> bottom double3 P; F=d3null(); P=d3null(); for(i=0;i<npart;i++) { sptoca(i); ends(i); if(r1[i].x>R-(sigma[i]/2.0)) { F.x+=r1[i].x-(R-sigma[i]/2.0); } if(r2[i].x>R-(sigma[i]/2.0)) { F.x+=r2[i].x-(R-sigma[i]/2.0); } if(r1[i].z-sigma[i]/2.0<0.0) { F.z+=fabs(r1[i].z-sigma[i]/2.0); } if(r1[i].z+sigma[i]/2.0>H) { F.y+=r1[i].z+sigma[i]/2.0-H; } if(r2[i].z-sigma[i]/2.0<0.0) { F.z+=fabs(r2[i].z-sigma[i]/2.0); } if(r2[i].z+sigma[i]/2.0>H) { F.y+=r2[i].z+sigma[i]/2.0-H; } } P.x=F.x/(2.0*M_PI*R*H); P.y=F.y/(M_PI*R*R); P.z=F.z/(M_PI*R*R); return sqrt(P.x*P.x)+sqrt(P.y*P.y)+sqrt(P.z*P.z); } double contacts() { /* * Arguments: none * Returns: the average number of other particles a particle is currently in contact with, this * value doesn't really mean much anymore */ int i; int j; double ret=0.0; for(i=0;i<npart;i++) { for(j=0;j<npart;j++) { sptoca(i); ends(i); if((i!=j) && (d3dist(r[i],r[j])<(l[i]+sigma[i]+l[j]+sigma[j])/2.0)) { double lambda_i, lambda_j; lambda_i=lambda(r[i],r[j],u[i],u[j],l[i]); lambda_j=lambda(r[j],r[i],u[j],u[i],l[j]); double d; d=d3SCdist(r[i],r[j],u[i],u[j],lambda_i,lambda_j); if(d<(sigma[i]+sigma[j])/2.0) { ret+=1.0; } } } sptoca(i); ends(i); if(r1[i].x>R-(sigma[i]/2.0)) { ret+=1.0; } if(r2[i].x>R-(sigma[i]/2.0)) { ret+=1.0; } if(r1[i].z-sigma[i]/2.0<0.0) { ret+=1.0; } if(r1[i].z+sigma[i]/2.0>H) { ret+=1.0; } if(r2[i].z-sigma[i]/2.0<0.0) { ret+=1.0; } if(r2[i].z+sigma[i]/2.0>H) { ret+=1.0; } } double U=collider(); if(sqrt((U/npart)*2.0)/(sigma[0]/2.0)<.05) { return 0; } else { return ret; } }
dfc4e29f04b92ee0f06434524029930ce2c0614a.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include "myhelpers.h" #include "d2func.h" #include "d3func.h" #include "Globalvars.h" #include "Enfunc.h" #include "Gradfunc.h" #include "ConGradfunc.h" double unitrand() { /* * Arguments: none * Returns: a "random" number between 0 and 1 */ return (((double)rand())/((double)(RAND_MAX))); } void initcond(double3 *x, double2 *ang, double *len, double *diam) { /* * Arguments: the 3d position array, the 2d angular orientation array, the length array, and * the diameter array * Returns: generates an initial random packing for our system, the position array will be in * cylindrical coordinates and will need to be converted to cartesian later */ int i; srand(time(NULL)); for(i=0;i<npart;i++) { x[i].x=R*unitrand(); x[i].y=2*M_PI*unitrand(); x[i].z=H*unitrand(); len[i]=0.0; diam[i]=0.0; ang[i].x=unitrand()*2*M_PI; ang[i].y=unitrand()*2*M_PI; } } double packfrac() { /* * Arguments: none * Returns: calculates and returns the packing fraction (currently only works if all particles * are the same size, if you want to fix that uncomment the for loop and don't multiply * Vp by npart and make that "=" a "+=") */ double Vp=0.0; double Vb=M_PI*R*R*H; int i=0; //for(i=0;i<npart;i++) { Vp=npart*(M_PI*(sigma[i]/2.0)*(sigma[i]/2.0)*l[i]+(4.0/3.0)*M_PI*(sigma[i]/2.0)*(sigma[i]/2.0)*(sigma[i]/2.0)); //} return Vp/Vb; } void updatephi() { /* * Arguments: none * Returns: updates our l and sigma values based on the current value of our global variable * phi, which represents our packing fraction (used to more easily iterate through packing * fractions rather than iterate through l and sigma values) */ double Vcont; double Vpart; if(CUBE==1) { Vcont=LENGTH*WIDTH*HEIGHT; } else { Vcont=M_PI*R*R*H; } Vpart=phi*Vcont; int i; for(i=0;i<npart;i++) { sigma[i]=pow((Vpart/npart)/(M_PI/6.0+M_PI*ALPHA/4.0),1.0/3.0); l[i]=ALPHA*sigma[i]; } } __global__ void updatephikernel(double* l, double* sigma, double* params, double* phi) { int tx=threadIdx.x+blockIdx.x*blockDim.x; double Vcont, Vpart; int npart; double R,H,ALPHA; npart=(int)params[0]; R=params[1]; H=params[2]; ALPHA=params[3]; Vcont=M_PI*R*R*H; Vpart=phi[0]*Vcont; sigma[tx]=pow((Vpart/npart)/(M_PI/6.0+M_PI*ALPHA/4.0),1.0/3.0); l[tx]=ALPHA*sigma[tx]; } __global__ void variabledphi(double* params, double U) { if(U<1e-9) { params[4]=params[4]*1.1; } else { params[4]=params[4]*0.9; } } __global__ void DeviceAdd(int i, double* a, double b) { a[i]+=b; } double Pressure() { /* * Arguments: none * Returns: calculates and returns the total pressure that the particles exert on the walls */ int i; double3 F; //F.x is the force on the radial wall, F.y is on the top wall, F.z -> bottom double3 P; F=d3null(); P=d3null(); for(i=0;i<npart;i++) { sptoca(i); ends(i); if(r1[i].x>R-(sigma[i]/2.0)) { F.x+=r1[i].x-(R-sigma[i]/2.0); } if(r2[i].x>R-(sigma[i]/2.0)) { F.x+=r2[i].x-(R-sigma[i]/2.0); } if(r1[i].z-sigma[i]/2.0<0.0) { F.z+=fabs(r1[i].z-sigma[i]/2.0); } if(r1[i].z+sigma[i]/2.0>H) { F.y+=r1[i].z+sigma[i]/2.0-H; } if(r2[i].z-sigma[i]/2.0<0.0) { F.z+=fabs(r2[i].z-sigma[i]/2.0); } if(r2[i].z+sigma[i]/2.0>H) { F.y+=r2[i].z+sigma[i]/2.0-H; } } P.x=F.x/(2.0*M_PI*R*H); P.y=F.y/(M_PI*R*R); P.z=F.z/(M_PI*R*R); return sqrt(P.x*P.x)+sqrt(P.y*P.y)+sqrt(P.z*P.z); } double contacts() { /* * Arguments: none * Returns: the average number of other particles a particle is currently in contact with, this * value doesn't really mean much anymore */ int i; int j; double ret=0.0; for(i=0;i<npart;i++) { for(j=0;j<npart;j++) { sptoca(i); ends(i); if((i!=j) && (d3dist(r[i],r[j])<(l[i]+sigma[i]+l[j]+sigma[j])/2.0)) { double lambda_i, lambda_j; lambda_i=lambda(r[i],r[j],u[i],u[j],l[i]); lambda_j=lambda(r[j],r[i],u[j],u[i],l[j]); double d; d=d3SCdist(r[i],r[j],u[i],u[j],lambda_i,lambda_j); if(d<(sigma[i]+sigma[j])/2.0) { ret+=1.0; } } } sptoca(i); ends(i); if(r1[i].x>R-(sigma[i]/2.0)) { ret+=1.0; } if(r2[i].x>R-(sigma[i]/2.0)) { ret+=1.0; } if(r1[i].z-sigma[i]/2.0<0.0) { ret+=1.0; } if(r1[i].z+sigma[i]/2.0>H) { ret+=1.0; } if(r2[i].z-sigma[i]/2.0<0.0) { ret+=1.0; } if(r2[i].z+sigma[i]/2.0>H) { ret+=1.0; } } double U=collider(); if(sqrt((U/npart)*2.0)/(sigma[0]/2.0)<.05) { return 0; } else { return ret; } }
66dca0a85858b499cb817e6dcb9df25d240efa04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include "for_use_GPU.h" #include "calc_feature_conf.h" #include "switch_release.h" #include "switch_float.h" /* declaration of texture memory */ //texture<FLOAT> A; //texture<FLOAT> B; texture<float, hipTextureType1D, hipReadModeElementType> A; texture<float, hipTextureType1D, hipReadModeElementType> B; texture<int2, hipTextureType1D, hipReadModeElementType> A_double; texture<int2, hipTextureType1D, hipReadModeElementType> B_double; //thread process /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // convolve A and B(non_symmetric) //unsigned __stdcall process(void *thread_arg) { /********************************************/ /* function for calculating root */ /********************************************/ extern "C" __global__ void process_root ( //FLOAT *A, //FLOAT *B, FLOAT *C, int *A_dims_array, int *B_dims_array, int len, int interval, int L_MAX, int *error_array, int error_array_num, int pid, int device_number ) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; int ii = blockIdx.z % len; int level = blockIdx.z / len; int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] }; int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] }; int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 }; int C_x = C_dims[1]/device_number; if(C_dims[1]%device_number != 0){ C_x++; } idx_x = idx_x + pid * C_x; if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){ return ; } if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && interval <= level && level < L_MAX ) { int num_features = A_dims[2]; const int A_SQ = A_dims[0]*A_dims[1]; const int B_SQ = B_dims[0]*B_dims[1]; FLOAT add_val = 0; int x = idx_x; int y = idx_y; int XA0 = A_dims[0]*x; /* apply loop condition */ for(int i=0; i<error_array_num; i++){ if(error_array[i] == level){ return; } } /* adjust the location of pointer of C */ FLOAT *dst; unsigned long long int pointer = (unsigned long long int)C; for(int a=interval; a<level; a++) { for(int b=0; b<len; b++) { int height = A_dims_array[a*3] - B_dims_array[b*3] + 1; int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if (height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int b=0; b<ii; b++){ int height = A_dims_array[level*3] - B_dims_array[b*3] + 1; int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if (height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } dst = (FLOAT *)pointer; /* adjust the location of pointer of A */ //unsigned long long int pointerA = (unsigned long long int)A; int A_index_ini = 0; for(int a=0; a<level; a++) { // pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT)); A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]; } /* adjust the location of pointer of B */ //unsigned long long int pointerB = (unsigned long long int)B; int B_index_ini = 0; for(int b=0; b<ii; b++) { // pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT)); B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]; } for(int f = 0; f < num_features; f++) // num_features = 31 { // FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ; int A_index = A_index_ini + f*A_SQ; // FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ; int B_index = B_index_ini + f*B_SQ; // FLOAT *A_src2 =A_src+XA0; A_index += XA0; FLOAT val = 0; // FLOAT *A_off = A_src2+y; A_index += y; // FLOAT *B_off = B_src; for (int xp = 0; xp < B_dims[1]; xp++) { // FLOAT *A_temp = A_off; int A_index_tmp = A_index; // FLOAT *B_temp = B_off; int B_index_tmp = B_index; for (int yp = 0; yp < B_dims[0]; yp++) { // val += *(A_temp++) * *(B_temp++); if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision { FLOAT A_val = tex1Dfetch(A, A_index_tmp); FLOAT B_val = tex1Dfetch(B, B_index_tmp); val += A_val * B_val; } else { // if configured to use double precision int2 A_val = tex1Dfetch(A_double, A_index_tmp); int2 B_val = tex1Dfetch(B_double, B_index_tmp); val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x); } A_index_tmp++; B_index_tmp++; } // A_off+=A_dims[0]; A_index += A_dims[0]; // B_off+=B_dims[0]; B_index += B_dims[0]; } add_val += val; } *(dst + (idx_x*C_dims[0] + idx_y)) += add_val; } return; } /********************************************/ /* function for calculating part */ /********************************************/ extern "C" __global__ void process_part ( //FLOAT *A, //FLOAT *B, FLOAT *C, int *A_dims_array, int *B_dims_array, int len, int interval, int L_MAX, int *error_array, int error_array_num, int pid, int device_number ) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; int ii = blockIdx.z % len; int level = blockIdx.z / len; int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] }; int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] }; int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 }; int C_x = C_dims[1]/device_number; if(C_dims[1]%device_number != 0){ C_x++; } idx_x = idx_x + pid * C_x; if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){ return ; } if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && 0 <= level && level < (L_MAX - interval) ) { int num_features = A_dims[2]; const int A_SQ = A_dims[0]*A_dims[1]; const int B_SQ = B_dims[0]*B_dims[1]; FLOAT add_val = 0; int x = idx_x; int y = idx_y; int XA0 = A_dims[0]*x; /* apply loop condition */ for(int i=0; i<error_array_num; i++){ if(error_array[i] == level) return; } /* adjust the location of pointer of C */ FLOAT *dst; unsigned long long int pointer = (unsigned long long int)C; for(int a=0; a<level; a++) { for(int b=0; b<len; b++){ int height = A_dims_array[a*3] - B_dims_array[b*3] + 1; int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if(height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int b=0; b<ii; b++){ int height = A_dims_array[level*3] - B_dims_array[b*3] + 1; int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if(height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } dst = (FLOAT *)pointer; /* adjust the location of pointer of A */ // unsigned long long int pointerA = (unsigned long long int)A; int A_index_ini = 0; for(int a=0; a<level; a++) { // pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT)); A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]; } /* adjust the location of pointer of B */ // unsigned long long int pointerB = (unsigned long long int)B; int B_index_ini = 0; for(int b=0; b<ii; b++) { // pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT)); B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]; } for(int f = 0; f < num_features; f++) // num_features = 31 { // FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ; int A_index = A_index_ini + f*A_SQ; // FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ; int B_index = B_index_ini + f*B_SQ; // FLOAT *A_src2 =A_src+XA0; A_index += XA0; FLOAT val = 0; // FLOAT *A_off = A_src2+y; A_index += y; // FLOAT *B_off = B_src; for (int xp = 0; xp < B_dims[1]; xp++) { // FLOAT *A_temp = A_off; int A_index_tmp = A_index; // FLOAT *B_temp = B_off; int B_index_tmp = B_index; for (int yp = 0; yp < B_dims[0]; yp++) { // val += *(A_temp++) * *(B_temp++); if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision { FLOAT A_val = tex1Dfetch(A, A_index_tmp); FLOAT B_val = tex1Dfetch(B, B_index_tmp); val += A_val * B_val; } else // if configured to use double precision { int2 A_val = tex1Dfetch(A_double, A_index_tmp); int2 B_val = tex1Dfetch(B_double, B_index_tmp); val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x); } A_index_tmp++; B_index_tmp++; } // A_off+=A_dims[0]; A_index += A_dims[0]; // B_off+=B_dims[0]; B_index += B_dims[0]; } add_val += val; } *(dst + (idx_x*C_dims[0] + idx_y)) += add_val; } return; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" __global__ void inverse_Q( FLOAT *src_start, int *size_array, int *error_array, int error_array_num, int NoP, int *PIDX_array, int *numpart, int NoC, int max_numpart, int interval, int L_MAX, int pid, int device_number ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int kk = blockIdx.y * blockDim.y + threadIdx.y; int jj = threadIdx.z; int L = blockIdx.z; int numpart_jj; int C_y; if(0<=jj && jj<NoC) { numpart_jj = numpart[jj]; C_y = numpart_jj/device_number; if(numpart_jj%device_number != 0){ C_y++; } kk = kk + pid * C_y; if(kk < C_y * pid || kk >= C_y * (pid + 1)){ return ; } } else return ; if(0<=L && L < (L_MAX-interval)) { /* loop condition */ for(int h=0; h<error_array_num; h++) { if(L==error_array[h]){ return; } } if( 0<=kk && kk < numpart_jj ) { int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; int dim0 = size_array[L*NoP*2 + PIDX*2]; int dim1 = size_array[L*NoP*2 + PIDX*2+1]; if( idx < 0 || dim0*dim1 <= idx) return; /* pointer adjustment */ FLOAT *src; unsigned long long int ptr_adjuster = (unsigned long long int)src_start; for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoP; j++) { int height = size_array[i*NoP*2 + j*2]; int width = size_array[i*NoP*2 + j*2+1]; ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int j=0; j<PIDX; j++) { int height = size_array[L*NoP*2 + j*2]; int width = size_array[L*NoP*2 + j*2+1]; ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT)); } src = (FLOAT *)ptr_adjuster; *(src + idx) *= -1; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // dt helper function __device__ void dt_helper(FLOAT *src, FLOAT *dst, int *ptr, int step, int s1, int s2, int d1, int d2, FLOAT a, FLOAT b) { if (d2 >= d1) { int d = (d1+d2) >> 1; int ds =d*step; int s = s1; FLOAT src_ss = *(src+s*step); for (int p = s1+1; p <= s2; p++) { int t1 = d-s; int t2 = d-p; if (src_ss + a*t1*t1 + b*t1 > *(src+p*step) + a*t2*t2 + b*t2) { s = p; src_ss = *(src+s*step); } } int D = d-s; dst[ds] = *(src+s*step) + a*D*D + b*D; ptr[ds] = s; dt_helper(src, dst, ptr, step, s1, s, d1, d-1, a, b); dt_helper(src, dst, ptr, step, s, s2, d+1, d2, a, b); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //sub function of dt extern "C" __global__ void dt1d_x( FLOAT *src_start, // part_C_dev FLOAT *dst_start, // tmpM_dev int *ptr_start, // tmpIy_dev int *DID_4_array, // DID_4_array_dev FLOAT *def_array, // def_array_dev int *size_array, // pm_size_array_dev int NoP, // NoP int *PIDX_array, // PIDX_array_dev int *error_array, // part_error_array_dev int error_array_num, // part_error_array_num int *numpart, // numpart_jj int NoC, // NoC int max_numpart, // max_numpart int interval, // interval int L_MAX, // L_MAX int pid, // pid int device_number // device_number ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int kk = blockIdx.y * blockDim.y + threadIdx.y; int jj = threadIdx.z; int L = blockIdx.z; int numpart_jj; int C_y; if(0<=jj && jj<NoC) { numpart_jj = numpart[jj]; C_y = numpart_jj/device_number; if(numpart_jj%device_number != 0){ C_y++; } kk = kk + pid * C_y; if(kk < C_y * pid || kk >= C_y * (pid + 1)){ return ; } } else{ return ; } if(0<=L && L<(L_MAX-interval)) { /* loop condition */ for(int h=0; h<error_array_num; h++) { if(L==error_array[h]){ return; } } if(0<=kk && kk<numpart_jj) { int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; int dim1 = size_array[L*NoP*2 + PIDX*2+1]; if( idx < 0 || dim1 <= idx ) return; int dim0 = size_array[L*NoP*2 + PIDX*2]; int XD=0; int step = 1; int n = dim0; int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; FLOAT a = def_array[DID_4+2]; FLOAT b = def_array[DID_4+3]; /* pointer adjustment */ unsigned long long int adj_src = (unsigned long long int)src_start; unsigned long long int adj_dst = (unsigned long long int)dst_start; unsigned long long int adj_ptr = (unsigned long long int)ptr_start; /* for src */ for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoP; j++) { int height = size_array[i*NoP*2 + j*2]; int width = size_array[i*NoP*2 + j*2+1]; adj_src += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int j=0; j<PIDX; j++) { int height = size_array[L*NoP*2 + j*2]; int width = size_array[L*NoP*2 + j*2+1]; adj_src += (unsigned long long int)(height*width*sizeof(FLOAT)); } /* for dst, ptr */ // adjust "dst" to tmpM[L][jj][kk] // adjust "ptr" to tmpIy[L][jj][kk] for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoC; j++) { for(int k=0; k<numpart[j]; k++) { int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k]; int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2]; int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1]; adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } } for(int i=0; i<jj; i++) { for(int j=0; j<numpart[i]; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j] int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2] int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1] adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } for(int j=0; j<kk; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j]; // PIDX_array[L][jj][j] int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2] int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1] adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } FLOAT *src = (FLOAT *)adj_src; FLOAT *dst = (FLOAT *)adj_dst; int *ptr = (int *)adj_ptr; /* main calculation of di1d_x */ XD = idx*dim0; dt_helper(src+XD, dst+XD, ptr+XD, step, 0, n-1, 0, n-1, a, b); } } } extern "C" __global__ void dt1d_y( FLOAT *src_start, // tmpM_dev FLOAT *dst_start, // M_dev int *ptr_start, // tmpIx_dev int *DID_4_array, // DID_4_array_dev FLOAT *def_array, // def_array_dev int NoP, // NoP int *size_array, // pm_size_array_dev int *numpart, // numpart_jj int *PIDX_array, // PIDX_array_dev int NoC, // NoC int max_numpart, // max_numpart int interval, // interval int L_MAX, // L_MAX int *error_array, // part_error_array_dev int error_array_num, // part_error_array_num int pid, // pid int device_number // device_number ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int kk = blockIdx.y * blockDim.y + threadIdx.y; int jj = threadIdx.z; int L = blockIdx.z; int numpart_jj; int C_y; if(0<=jj && jj<NoC) { numpart_jj = numpart[jj]; C_y = numpart_jj/device_number; if(numpart_jj%device_number != 0){ C_y++; } kk = kk + pid * C_y; if(kk < C_y * pid || kk >= C_y * (pid + 1)){ return ; } } else{ return ; } if(0<=L && L<(L_MAX-interval)) { /* loop condition */ for(int h=0; h<error_array_num; h++) { if(L==error_array[h]){ return; } } if( 0<=kk && kk<numpart_jj) { int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; int dim0 = size_array[L*NoP*2 + PIDX*2]; if( idx < 0 || dim0 <= idx ) return; int dim1 = size_array[L*NoP*2 + PIDX*2+1]; int step = dim0; int n = dim1; int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; FLOAT a = def_array[DID_4]; // ax FLOAT b = def_array[DID_4+1]; // bx /* pointer adjustment */ unsigned long long int adj_src = (unsigned long long int)src_start; unsigned long long int adj_dst = (unsigned long long int)dst_start; unsigned long long int adj_ptr = (unsigned long long int)ptr_start; /* for src, dst, ptr */ /* adjust "src" to tmpM[L][jj][kk] */ /* adjust "dst" to M[L][jj][kk] */ /* adjust "ptr" to tmpIx[L][jj][kk] */ for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoC; j++) { for(int k=0; k<numpart[j]; k++) { int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k]; int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2]; int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1]; adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } } for(int i=0; i<jj; i++) { for(int j=0; j<numpart[i]; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j] int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2] int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1] adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } for(int j=0; j<kk; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j]; int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } FLOAT *src = (FLOAT *)adj_src; FLOAT *dst = (FLOAT *)adj_dst; int *ptr = (int *)adj_ptr; dt_helper(src+idx, dst+idx, ptr+idx, step, 0, n-1, 0, n-1, a, b); } } } /*************************************************************/ /*************************************************************/ /* original source of dt function loop */ // for (int x = 0; x < dims[1]; x++) // { // dt1d(vals+XD, tmpM+XD, tmpIy+XD, 1, dims[0], ay, by); // XD+=dims[0]; // } // for (int y = 0; y < dims[0]; y++) // { // dt1d(tmpM+y, M+y, tmpIx+y, dims[0], dims[1], ax, bx); // } /*************************************************************/ /*************************************************************/ extern "C" __global__ void calc_a_score( int IWID, int IHEI, FLOAT scale, int padx_n, int pady_n, int *RX_array, int *RY_array, FLOAT *ac_score, FLOAT *score_array, int *ssize_array, int NoC, int *size_score_array ) { int ii = blockIdx.x * blockDim.x + threadIdx.x; int jj = blockIdx.y * blockDim.y + threadIdx.y; int component_jj = threadIdx.z; if(0<=component_jj && component_jj < NoC) { unsigned long long int pointer_score = (unsigned long long int)score_array; unsigned long long int pointer_ssize = (unsigned long long int)ssize_array; unsigned long long int pointer_RX = (unsigned long long int)RX_array; unsigned long long int pointer_RY = (unsigned long long int)RY_array; for(int k=0; k<component_jj; k++) { pointer_score += (unsigned long long int)size_score_array[k]; pointer_ssize += (unsigned long long int)(sizeof(int)); pointer_RX += (unsigned long long int)(sizeof(int)); pointer_RY += (unsigned long long int)(sizeof(int)); } FLOAT *score = (FLOAT *)pointer_score; int ssize0 = *((int *)pointer_ssize); int ssize1 = *((int *)pointer_ssize + sizeof(int)); int RX = *((int *)pointer_RX); int RY = *((int *)pointer_RY); if(0<=ii && ii<IWID && 0<=jj && jj<IHEI) { int Xn = (int)((FLOAT)ii/scale+padx_n); int Yn = (int)((FLOAT)jj/scale+pady_n); if(Yn<ssize0 && Xn<ssize1) { FLOAT sc = score[Yn+Xn*ssize0]; int Im_Y = jj+RY; int Im_X = ii+RX; if(Im_Y<IHEI && Im_X<IWID) { FLOAT *PP = ac_score+Im_Y+Im_X*IHEI; if(sc>*PP) *PP=sc; } } } } /*************************************************************/ /*************************************************************/ /* original source of calc_a_score loop */ // for(int ii=0;ii<IWID;ii++) // { // int Xn=(int)((FLOAT)ii/scale+padx_n); // for(int jj=0;jj<IHEI;jj++) // { // int Yn =(int)((FLOAT)jj/scale+pady_n); // if(Yn<ssize[0] && Xn<ssize[1]) // { // FLOAT sc = score[Yn+Xn*ssize[0]]; //get score of pixel // int Im_Y = jj+RY; // int Im_X = ii+RX; // if(Im_Y<IHEI && Im_X<IWID) // { // FLOAT *PP=ac_score+Im_Y+Im_X*IHEI; //consider root rectangle size // if(sc>*PP) *PP=sc; //save max score // } // } // } // } /*************************************************************/ /*************************************************************/ } __device__ static inline int min_i(int x, int y) {return (x <= y ? x : y);} #ifdef USE_FLOAT_AS_DECIMAL /************************************************/ /* atomic function dealing with float precision */ __device__ static inline float atomicAdd_float(float *address, float val) { return atomicAdd(address, val); // atomicAdd must be called from "__device__" function } /*************************************************/ #else /* ifdef USE_FLOAT_AS_DECIMAL */ /*************************************************/ /* atomic function dealing with double precision */ __device__ static inline double atomicAdd_double (double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); }while(assumed != old); return __longlong_as_double(old); } /*************************************************/ #endif /* ifdef USE_FLOAT_AS_DECIMAL */ /***********************************************************/ /* function which cast from int2 to unsigned long long int */ __device__ static inline unsigned long long int hiloint2uint64(int hi, int lo) { int combined[] = {hi, lo}; return *reinterpret_cast<unsigned long long int*>(combined); } /***********************************************************/ /* declaration of texture memory */ #ifdef USE_FLOAT_AS_DECIMAL texture<float, hipTextureType1D, hipReadModeElementType> resized_image; #else texture<uint2, hipTextureType1D, hipReadModeElementType> resized_image_double; #endif texture<int , hipTextureType1D, hipReadModeElementType> resized_image_size; texture<int, hipTextureType1D, hipReadModeElementType> image_idx_incrementer; texture<uint2, hipTextureType1D, hipReadModeElementType> hist_ptr_incrementer; texture<uint2, hipTextureType1D, hipReadModeElementType> norm_ptr_incrementer; texture<uint2, hipTextureType1D, hipReadModeElementType> feat_ptr_incrementer; extern "C" __global__ void calc_hist ( FLOAT *hist_top, int sbin, int visible_0, int visible_1, int level ) { /* index of each pixels */ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; const FLOAT Hcos[9] = {1.0000, 0.9397, 0.7660, 0.5000, 0.1736, -0.1736, -0.5000, -0.7660, -0.9397}; const FLOAT Hsin[9] = {0.0000, 0.3420, 0.6428, 0.8660, 0.9848, 0.9848, 0.8660, 0.6428, 0.3420}; /* adjust pointer position */ int base_index = tex1Dfetch(image_idx_incrementer, level); uint2 ptr_hist_uint2 = tex1Dfetch(hist_ptr_incrementer, level); unsigned long long int ptr_hist = (unsigned long long int)hist_top + hiloint2uint64(ptr_hist_uint2.x, ptr_hist_uint2.y); // convert uint2 -> unsigned long long int FLOAT *hist = (FLOAT *)ptr_hist; /* input size */ const int height = tex1Dfetch(resized_image_size, level*3); const int width = tex1Dfetch(resized_image_size, level*3 + 1); const int dims[2] = {height, width}; /* size of Histgrams and Norm calculation space */ const int blocks[2] = { (int)floor((double)height/(double)sbin+0.5), (int)floor((double)width/(double)sbin+0.5) }; // for (int x=1; x<visible[1]-1; x++) { // for (int y=1; y<visible[0]-1; y++) { if (1<=x && x<visible_1-1 && 1<=y && y<visible_0-1) { /* first color channel */ // base_index += min_i(x, dims[1]-2)*dims[0] + min_i(y, dims[0]-2); base_index += min_i(x, dims[1]-2) + min_i(y, dims[0]-2)*dims[1]; FLOAT dx, dy; #ifdef USE_FLOAT_AS_DECIMAL { // dy = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; // dx = tex1Dfetch(resized_image, base_index + dims[0]) - tex1Dfetch(resized_image, base_index - dims[0]) ; dx = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; dy = tex1Dfetch(resized_image, base_index + dims[1]) - tex1Dfetch(resized_image, base_index - dims[1]) ; } #else { int2 arg1 = tex1Dfetch(resized_image_double, base_index + 1); int2 arg2 = tex1Dfetch(resized_image_double, base_index - 1) ; // dy = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); dx = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); // arg1 = tex1Dfetch(resized_image_double, base_index + dims[0]); // arg2 = tex1Dfetch(resized_image_double, base_index - dims[0]); // dx = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); arg1 = tex1Dfetch(resized_image_double, base_index + dims[1]); arg2 = tex1Dfetch(resized_image_double, base_index - dims[1]); dy = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); } #endif FLOAT v = dx*dx + dy*dy; /* second color channel */ base_index += dims[0]*dims[1]; FLOAT dx2, dy2; #ifdef USE_FLOAT_AS_DECIMAL { // dy2 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; // dx2 = tex1Dfetch(resized_image, base_index + dims[0]) - tex1Dfetch(resized_image, base_index - dims[0]) ; dx2 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; dy2 = tex1Dfetch(resized_image, base_index + dims[1]) - tex1Dfetch(resized_image, base_index - dims[1]) ; } #else { int2 arg1 = tex1Dfetch(resized_image_double, base_index + 1); int2 arg2 = tex1Dfetch(resized_image_double, base_index - 1) ; // dy2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); dx2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); // arg1 = tex1Dfetch(resized_image_double, base_index + dims[0]); // arg2 = tex1Dfetch(resized_image_double, base_index - dims[0]); // dx2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); arg1 = tex1Dfetch(resized_image_double, base_index + dims[1]); arg2 = tex1Dfetch(resized_image_double, base_index - dims[1]); dy2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); } #endif FLOAT v2 = dx2*dx2 + dy2*dy2; /* third color channel */ base_index += dims[0]*dims[1]; FLOAT dx3, dy3; #ifdef USE_FLOAT_AS_DECIMAL { // dy3 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; // dx3 = tex1Dfetch(resized_image, base_index + dims[0]) - tex1Dfetch(resized_image, base_index - dims[0]) ; dx3 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; dy3 = tex1Dfetch(resized_image, base_index + dims[1]) - tex1Dfetch(resized_image, base_index - dims[1]) ; } #else { int2 arg1 = tex1Dfetch(resized_image_double, base_index + 1); int2 arg2 = tex1Dfetch(resized_image_double, base_index - 1) ; // dy3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); dx3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); // arg1 = tex1Dfetch(resized_image_double, base_index + dims[0]); // arg2 = tex1Dfetch(resized_image_double, base_index - dims[0]); // dx3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); arg1 = tex1Dfetch(resized_image_double, base_index + dims[1]); arg2 = tex1Dfetch(resized_image_double, base_index - dims[1]); dy3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); } #endif FLOAT v3 = dx3*dx3 + dy3*dy3; /* pick channel with strongest gradient */ // if (v2 > v) { // v = v2; // dx = dx2; // dy = dy2; // } dx = (v2 > v) ? dx2 : dx; dy = (v2 > v) ? dy2 : dy; v = (v2 > v) ? v2 : v; // if (v3 > v) { // v = v3; // dx = dx3; // dy = dy3; // } dx = (v3 > v) ? dx3 : dx; dy = (v3 > v) ? dy3 : dy; v = (v3 > v) ? v3 : v; /* snap to one of 18 orientations */ FLOAT best_dot = 0; int best_o = 0; #pragma unroll 9 for (int o=0; o<9; o++) { FLOAT dot = Hcos[o]*dx + Hsin[o]*dy; if (dot > best_dot) { best_dot = dot; best_o = o; } else if (-dot > best_dot) { best_dot = -dot; best_o = o + 9; } } /*add to 4 histgrams aroud pixel using linear interpolation*/ FLOAT xp = ((FLOAT)x+0.5)/(FLOAT)sbin - 0.5; FLOAT yp = ((FLOAT)y+0.5)/(FLOAT)sbin - 0.5; int ixp = (int)floor((double)xp); int iyp = (int)floor((double)yp); FLOAT vx0 = xp - ixp; FLOAT vy0 = yp - iyp; FLOAT vx1 = 1.0 - vx0; FLOAT vy1 = 1.0 - vy0; v = sqrt((double)v); #ifdef USE_FLOAT_AS_DECIMAL { /* dummy variable to reduce warp divergence */ // float retval = 0; if (ixp >= 0 && iyp >= 0) { atomicAdd_float((float *)(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx1*vy1*v); } // retval = (ixp >= 0 && iyp >= 0) ? // atomicAdd_float((float *)(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx1*vy1*v) : // 0; if (ixp+1 < blocks[1] && iyp >= 0) { atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx0*vy1*v); } // retval = (ixp+1 < blocks[1] && iyp >= 0) ? // atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx0*vy1*v) : // 0; if (ixp >= 0 && iyp+1 < blocks[0]) { atomicAdd_float((float *)(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx1*vy0*v); } // retval = (ixp >= 0 && iyp+1 < blocks[0]) ? // atomicAdd_float((float *)(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx1*vy0*v) : // 0; if (ixp+1 < blocks[1] && iyp+1 < blocks[0]) { atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx0*vy0*v); } // retval = (ixp+1 < blocks[1] && iyp+1 < blocks[0]) ? // atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx0*vy0*v) : // 0; } #else /* ifdef USE_FLOAT_AS_DECIMAL */ { if (ixp >= 0 && iyp >= 0) { atomicAdd_double((double *)(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (double)vx1*vy1*v); } if (ixp+1 < blocks[1] && iyp >= 0) { atomicAdd_double((double *)(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (double)vx0*vy1*v); } if (ixp >= 0 && iyp+1 < blocks[0]) { atomicAdd_double((double *)(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (double)vx1*vy0*v); } if (ixp+1 < blocks[1] && iyp+1 < blocks[0]) { atomicAdd_double((double *)(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (double)vx0*vy0*v); } } #endif /* ifdef USE_FLOAT_AS_DECIMAL */ } // } // } /*************************************************************/ /* original source of calc hist loop */ // for (int x=1; x<visible[1]-1; x++) { // for (int y=1; y<visible[0]-1; y++) { // /* first color channel */ // FLOAT *s = SRC + min_i(x, dims[1]-2)*dims[0] + min_i(y, dims[0]-2); // FLOAT dy = *(s+1) - *(s-1); // FLOAT dx = *(s+dims[0]) - *(s-dims[0]); // FLOAT v = dx*dx + dy*dy; // /* second color channel */ // s += dims[0]*dims[1]; // FLOAT dy2 = *(s+1) - *(s-1); // FLOAT dx2 = *(s+dims[0]) - *(s-dims[0]); // FLOAT v2 = dx2*dx2 + dy2*dy2; // /* third color channel */ // s += dims[0]*dims[1]; // FLOAT dy3 = *(s+1) - *(s-1); // FLOAT dx3 = *(s+dims[0]) - *(s-dims[0]); // FLOAT v3 = dx3*dx3 + dy3*dy3; // /* pick channel with strongest gradient */ // if (v2 > v) { // v = v2; // dx = dx2; // dy = dy2; // } // if (v3 > v) { // v = v3; // dx = dx3; // dy = dy3; // } // /* snap to one of 18 orientations */ // FLOAT best_dot = 0; // int best_o = 0; // for (int o=0; o<9; o++) { // FLOAT dot = Hcos[o]*dx + Hsin[o]*dy; // if (dot > best_dot) { // best_dot = dot; // best_o = o; // } // else if (-dot > best_dot) { // best_dot = -dot; // best_o = o + 9; // } // } // /*add to 4 histgrams aroud pixel using linear interpolation*/ // FLOAT xp = ((FLOAT)x+0.5)/(FLOAT)sbin - 0.5; // FLOAT yp = ((FLOAT)y+0.5)/(FLOAT)sbin - 0.5; // int ixp = (int)floor(xp); // int iyp = (int)floor(yp); // FLOAT vx0 = xp - ixp; // FLOAT vy0 = yp - iyp; // FLOAT vx1 = 1.0 - vx0; // FLOAT vy1 = 1.0 - vy0; // v = sqrt(v); // if (ixp >= 0 && iyp >= 0) { // *(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]) += vx1*vy1*v; // } // if (ixp+1 < blocks[1] && iyp >= 0) { // *(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]) += vx0*vy1*v; // } // if (ixp >= 0 && iyp+1 < blocks[0]) { // *(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]) += vx1*vy0*v; // } // if (ixp+1 < blocks[1] && iyp+1 < blocks[0]) { // *(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]) += vx0*vy0*v; // } // } // } /*************************************************************/ /*************************************************************/ } extern "C" __global__ void calc_norm ( FLOAT *hist_top, FLOAT *norm_top, int blocks_0, int blocks_1, int level ) { /* index of each element of norm */ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x<blocks_1 && y<blocks_0) { /* adjust pointer position */ uint2 ptr_uint2 = tex1Dfetch(hist_ptr_incrementer, level); unsigned long long int ptr_hist = (unsigned long long int)hist_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int ptr_uint2 = tex1Dfetch(norm_ptr_incrementer, level); unsigned long long int ptr_norm = (unsigned long long int)norm_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *dst = (FLOAT *)(ptr_norm + (x*blocks_0 + y)*sizeof(FLOAT)); FLOAT add_val = 0; #pragma unroll 9 for (int orient=0; orient<9; orient++) { FLOAT *src1 = (FLOAT *)(ptr_hist + (orient*blocks_0*blocks_1 + x*blocks_0 + y)*sizeof(FLOAT)); FLOAT *src2 = (FLOAT *)(ptr_hist + ((orient+9)*blocks_0*blocks_1 + x*blocks_0 + y)*sizeof(FLOAT)); add_val += (*src1 + *src2) * (*src1 + *src2); } *(dst) += add_val; } /*************************************************************/ /* original source of compute_energy loop */ // /* compute energy in each block by summing over orientations */ // for (int o=0; o<9; o++) { // FLOAT *src1 = hist + o*blocks[0]*blocks[1]; // FLOAT *src2 = hist + (o+9)*blocks[0]*blocks[1]; // FLOAT *dst = norm; // FLOAT *end = norm + blocks[0]*blocks[1]; // while(dst < end) { // *(dst++) += (*src1 + *src2) * (*src1 + *src2); // src1++; // src2++; // } // } /*************************************************************/ /*************************************************************/ } /* definition of constant */ #define EPS 0.0001 //return minimum number (FLOAT) __device__ static inline FLOAT min_2(FLOAT x) {return (x <= 0.2 ? x :0.2);} extern "C" __global__ void calc_feat ( FLOAT *hist_top, FLOAT *norm_top, FLOAT *feat_top, int out_0, int out_1, int blocks_0, int blocks_1, int level ) { /* index of each element of feat */ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; /* adjust pointer position */ uint2 ptr_uint2 = tex1Dfetch(hist_ptr_incrementer, level); unsigned long long int ptr_hist = (unsigned long long int)hist_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *hist = (FLOAT *)ptr_hist; ptr_uint2 = tex1Dfetch(norm_ptr_incrementer, level); unsigned long long int ptr_norm = (unsigned long long int)norm_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *norm = (FLOAT *)ptr_norm; ptr_uint2 = tex1Dfetch(feat_ptr_incrementer, level); unsigned long long int ptr_feat = (unsigned long long int)feat_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *feat = (FLOAT *)ptr_feat; if (x<out_1 && y<out_0) { // for (int x=0; x<out[1]; x++) { // for (int y=0; y<out[0]; y++) { // FLOAT *dst = feat + x*out[0] + y; FLOAT *dst = feat + x*out_0 + y; FLOAT *src, *p, n1, n2, n3, n4; // p = norm + (x+1)*blocks[0] + y+1; p = norm + (x+1)*blocks_0 + y+1; // n1 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n1 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); // p = norm + (x+1)*blocks[0] + y; p = norm + (x+1)*blocks_0 + y; // n2 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n2 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); // p = norm + x*blocks[0] + y+1; p = norm + x*blocks_0 + y+1; // n3 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n3 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); // p = norm + x*blocks[0] + y; p = norm + x*blocks_0 + y; // n4 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n4 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); FLOAT t1 = 0; FLOAT t2 = 0; FLOAT t3 = 0; FLOAT t4 = 0; /* contrast-sensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); src = hist + (x+1)*blocks_0 + (y+1); #pragma unroll 18 for (int o=0; o<18; o++) { FLOAT h1 = min_2(*src * n1); FLOAT h2 = min_2(*src * n2); FLOAT h3 = min_2(*src * n3); FLOAT h4 = min_2(*src * n4); *dst = 0.5 * (h1 + h2 + h3 + h4); t1 += h1; t2 += h2; t3 += h3; t4 += h4; // dst += out[0]*out[1]; dst += out_0*out_1; // src += blocks[0]*blocks[1]; src += blocks_0*blocks_1; } /* contrast-insensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); src = hist + (x+1)*blocks_0 + (y+1); #pragma unroll 9 for (int o=0; o<9; o++) { // FLOAT sum = *src + *(src + 9*blocks[0]*blocks[1]); FLOAT sum = *src + *(src + 9*blocks_0*blocks_1); FLOAT h1 = min_2(sum * n1); FLOAT h2 = min_2(sum * n2); FLOAT h3 = min_2(sum * n3); FLOAT h4 = min_2(sum * n4); *dst = 0.5 * (h1 + h2 + h3 + h4); // dst += out[0]*out[1]; dst += out_0*out_1; // src += blocks[0]*blocks[1]; src += blocks_0*blocks_1; } /* texture features */ *dst = 0.2357 * t1; // dst += out[0]*out[1]; dst += out_0*out_1; *dst = 0.2357 * t2; // dst += out[0]*out[1]; dst += out_0*out_1; *dst = 0.2357 * t3; // dst += out[0]*out[1]; dst += out_0*out_1; *dst = 0.2357 * t4; } // } //} /*************************************************************/ /* original source of compute features loop */ // /* compute featuers */ // for (int x=0; x<out[1]; x++) { // for (int y=0; y<out[0]; y++) { // FLOAT *dst = feat + x*out[0] + y; // FLOAT *src, *p, n1, n2, n3, n4; // p = norm + (x+1)*blocks[0] + y+1; // n1 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // p = norm + (x+1)*blocks[0] + y; // n2 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // p = norm + x*blocks[0] + y+1; // n3 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // p = norm + x*blocks[0] + y; // n4 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // FLOAT t1 = 0; // FLOAT t2 = 0; // FLOAT t3 = 0; // FLOAT t4 = 0; // /* contrast-sensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); // for (int o=0; o<18; o++) { // FLOAT h1 = min_2(*src * n1); // FLOAT h2 = min_2(*src * n2); // FLOAT h3 = min_2(*src * n3); // FLOAT h4 = min_2(*src * n4); // *dst = 0.5 * (h1 + h2 + h3 + h4); // t1 += h1; // t2 += h2; // t3 += h3; // t4 += h4; // dst += out[0]*out[1]; // src += blocks[0]*blocks[1]; // } // /* contrast-insensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); // for (int o=0; o<9; o++) { // FLOAT sum = *src + *(src + 9*blocks[0]*blocks[1]); // FLOAT h1 = min_2(sum * n1); // FLOAT h2 = min_2(sum * n2); // FLOAT h3 = min_2(sum * n3); // FLOAT h4 = min_2(sum * n4); // *dst = 0.5 * (h1 + h2 + h3 + h4); // dst += out[0]*out[1]; // src += blocks[0]*blocks[1]; // } // /* texture features */ // *dst = 0.2357 * t1; // dst += out[0]*out[1]; // *dst = 0.2357 * t2; // dst += out[0]*out[1]; // *dst = 0.2357 * t3; // dst += out[0]*out[1]; // *dst = 0.2357 * t4; // } // } /*************************************************************/ /*************************************************************/ } /* texture declaration for original image */ #ifdef USE_FLOAT_AS_DECIMAL texture<float, hipTextureType2DLayered, hipReadModeElementType> org_image; #else texture<uint2, hipTextureType2DLayered, hipReadModeElementType> org_image; #endif #ifndef USE_FLOAT_AS_DECIMAL #define NO_HARDWARE_SUPPORT #endif #ifdef NO_HARDWARE_SUPPORT __device__ static inline double getPixelVal(int x, int y, int width, int height, int channel) { int access_x = (x < 0) ? 0 : (x < width) ? x : (width-1); int access_y = (y < 0) ? 0 : (y < height) ? y : (height-1); int2 retval = tex1Dfetch(org_image, channel*height*width + access_y*width + access_x); return __hiloint2double(retval.y, retval.x); } #endif extern "C" __global__ void resize ( int src_height, int src_width, FLOAT *dst_top, int dst_height, int dst_width, FLOAT hfactor, FLOAT wfactor, int level ) { int dst_x = blockIdx.x*blockDim.x + threadIdx.x; int dst_y = blockIdx.y*blockDim.y + threadIdx.y; int channel = blockIdx.z; FLOAT *dst = dst_top + tex1Dfetch(image_idx_incrementer, level) + channel*dst_height*dst_width; // unsigned long long int dst_ptr = (unsigned long long int)dst_top + // (unsigned long long int)(tex1Dfetch(image_idx_incrementer, level) + channel*dst_height*dst_width)*sizeof(FLOAT); // FLOAT *dst = (FLOAT *)dst_ptr; FLOAT src_x_decimal = wfactor * dst_x + 0.5f; FLOAT src_y_decimal = hfactor * dst_y + 0.5f; #ifdef USE_FLOAT_AS_DECIMAL if (dst_x < dst_width && dst_y < dst_height) { dst[dst_y*dst_width + dst_x] = (FLOAT)tex2DLayered(org_image, src_x_decimal, src_y_decimal, channel); } #else /* if "double" type is used to express decimal value, there is no hardware support */ int src_x = (int)src_x_decimal; int src_y = (int)src_y_decimal; double color[4] = { getPixelVal(src_x, src_y, src_width, src_height, channel), getPixelVal(src_x+1, src_y, src_width, src_height, channel), getPixelVal(src_x, src_y+1, src_width, src_height, channel), getPixelVal(src_x+1, src_y+1, src_width, src_height, channel) }; double new_element = (src_x + 1 - src_x_decimal)*(src_y + 1 - src_y_decimal)*color[0] + (src_x_decimal - src_x)*(src_y + 1 - src_y_decimal)*color[1] + (src_x + 1 - src_x_decimal)*(src_y_decimal - src_y)*color[2] + (src_x_decimal - src_x)*(src_y_decimal - src_y)*color[3]; if (dst_x < dst_width && dst_y < dst_height) { dst[dst_y*dst_width + dst_x] = new_element; } #endif }
66dca0a85858b499cb817e6dcb9df25d240efa04.cu
#include <stdio.h> #include <math.h> #include "for_use_GPU.h" #include "calc_feature_conf.h" #include "switch_release.h" #include "switch_float.h" /* declaration of texture memory */ //texture<FLOAT> A; //texture<FLOAT> B; texture<float, cudaTextureType1D, cudaReadModeElementType> A; texture<float, cudaTextureType1D, cudaReadModeElementType> B; texture<int2, cudaTextureType1D, cudaReadModeElementType> A_double; texture<int2, cudaTextureType1D, cudaReadModeElementType> B_double; //thread process /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // convolve A and B(non_symmetric) //unsigned __stdcall process(void *thread_arg) { /********************************************/ /* function for calculating root */ /********************************************/ extern "C" __global__ void process_root ( //FLOAT *A, //FLOAT *B, FLOAT *C, int *A_dims_array, int *B_dims_array, int len, int interval, int L_MAX, int *error_array, int error_array_num, int pid, int device_number ) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; int ii = blockIdx.z % len; int level = blockIdx.z / len; int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] }; int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] }; int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 }; int C_x = C_dims[1]/device_number; if(C_dims[1]%device_number != 0){ C_x++; } idx_x = idx_x + pid * C_x; if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){ return ; } if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && interval <= level && level < L_MAX ) { int num_features = A_dims[2]; const int A_SQ = A_dims[0]*A_dims[1]; const int B_SQ = B_dims[0]*B_dims[1]; FLOAT add_val = 0; int x = idx_x; int y = idx_y; int XA0 = A_dims[0]*x; /* apply loop condition */ for(int i=0; i<error_array_num; i++){ if(error_array[i] == level){ return; } } /* adjust the location of pointer of C */ FLOAT *dst; unsigned long long int pointer = (unsigned long long int)C; for(int a=interval; a<level; a++) { for(int b=0; b<len; b++) { int height = A_dims_array[a*3] - B_dims_array[b*3] + 1; int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if (height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int b=0; b<ii; b++){ int height = A_dims_array[level*3] - B_dims_array[b*3] + 1; int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if (height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } dst = (FLOAT *)pointer; /* adjust the location of pointer of A */ //unsigned long long int pointerA = (unsigned long long int)A; int A_index_ini = 0; for(int a=0; a<level; a++) { // pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT)); A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]; } /* adjust the location of pointer of B */ //unsigned long long int pointerB = (unsigned long long int)B; int B_index_ini = 0; for(int b=0; b<ii; b++) { // pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT)); B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]; } for(int f = 0; f < num_features; f++) // num_features = 31 { // FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ; int A_index = A_index_ini + f*A_SQ; // FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ; int B_index = B_index_ini + f*B_SQ; // FLOAT *A_src2 =A_src+XA0; A_index += XA0; FLOAT val = 0; // FLOAT *A_off = A_src2+y; A_index += y; // FLOAT *B_off = B_src; for (int xp = 0; xp < B_dims[1]; xp++) { // FLOAT *A_temp = A_off; int A_index_tmp = A_index; // FLOAT *B_temp = B_off; int B_index_tmp = B_index; for (int yp = 0; yp < B_dims[0]; yp++) { // val += *(A_temp++) * *(B_temp++); if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision { FLOAT A_val = tex1Dfetch(A, A_index_tmp); FLOAT B_val = tex1Dfetch(B, B_index_tmp); val += A_val * B_val; } else { // if configured to use double precision int2 A_val = tex1Dfetch(A_double, A_index_tmp); int2 B_val = tex1Dfetch(B_double, B_index_tmp); val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x); } A_index_tmp++; B_index_tmp++; } // A_off+=A_dims[0]; A_index += A_dims[0]; // B_off+=B_dims[0]; B_index += B_dims[0]; } add_val += val; } *(dst + (idx_x*C_dims[0] + idx_y)) += add_val; } return; } /********************************************/ /* function for calculating part */ /********************************************/ extern "C" __global__ void process_part ( //FLOAT *A, //FLOAT *B, FLOAT *C, int *A_dims_array, int *B_dims_array, int len, int interval, int L_MAX, int *error_array, int error_array_num, int pid, int device_number ) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; int ii = blockIdx.z % len; int level = blockIdx.z / len; int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] }; int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] }; int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 }; int C_x = C_dims[1]/device_number; if(C_dims[1]%device_number != 0){ C_x++; } idx_x = idx_x + pid * C_x; if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){ return ; } if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && 0 <= level && level < (L_MAX - interval) ) { int num_features = A_dims[2]; const int A_SQ = A_dims[0]*A_dims[1]; const int B_SQ = B_dims[0]*B_dims[1]; FLOAT add_val = 0; int x = idx_x; int y = idx_y; int XA0 = A_dims[0]*x; /* apply loop condition */ for(int i=0; i<error_array_num; i++){ if(error_array[i] == level) return; } /* adjust the location of pointer of C */ FLOAT *dst; unsigned long long int pointer = (unsigned long long int)C; for(int a=0; a<level; a++) { for(int b=0; b<len; b++){ int height = A_dims_array[a*3] - B_dims_array[b*3] + 1; int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if(height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int b=0; b<ii; b++){ int height = A_dims_array[level*3] - B_dims_array[b*3] + 1; int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1; /* error semantics */ if(height < 1 || width < 1){ printf("Invalid input in GPU\n"); return; } pointer += (unsigned long long int)(height*width*sizeof(FLOAT)); } dst = (FLOAT *)pointer; /* adjust the location of pointer of A */ // unsigned long long int pointerA = (unsigned long long int)A; int A_index_ini = 0; for(int a=0; a<level; a++) { // pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT)); A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]; } /* adjust the location of pointer of B */ // unsigned long long int pointerB = (unsigned long long int)B; int B_index_ini = 0; for(int b=0; b<ii; b++) { // pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT)); B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]; } for(int f = 0; f < num_features; f++) // num_features = 31 { // FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ; int A_index = A_index_ini + f*A_SQ; // FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ; int B_index = B_index_ini + f*B_SQ; // FLOAT *A_src2 =A_src+XA0; A_index += XA0; FLOAT val = 0; // FLOAT *A_off = A_src2+y; A_index += y; // FLOAT *B_off = B_src; for (int xp = 0; xp < B_dims[1]; xp++) { // FLOAT *A_temp = A_off; int A_index_tmp = A_index; // FLOAT *B_temp = B_off; int B_index_tmp = B_index; for (int yp = 0; yp < B_dims[0]; yp++) { // val += *(A_temp++) * *(B_temp++); if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision { FLOAT A_val = tex1Dfetch(A, A_index_tmp); FLOAT B_val = tex1Dfetch(B, B_index_tmp); val += A_val * B_val; } else // if configured to use double precision { int2 A_val = tex1Dfetch(A_double, A_index_tmp); int2 B_val = tex1Dfetch(B_double, B_index_tmp); val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x); } A_index_tmp++; B_index_tmp++; } // A_off+=A_dims[0]; A_index += A_dims[0]; // B_off+=B_dims[0]; B_index += B_dims[0]; } add_val += val; } *(dst + (idx_x*C_dims[0] + idx_y)) += add_val; } return; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" __global__ void inverse_Q( FLOAT *src_start, int *size_array, int *error_array, int error_array_num, int NoP, int *PIDX_array, int *numpart, int NoC, int max_numpart, int interval, int L_MAX, int pid, int device_number ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int kk = blockIdx.y * blockDim.y + threadIdx.y; int jj = threadIdx.z; int L = blockIdx.z; int numpart_jj; int C_y; if(0<=jj && jj<NoC) { numpart_jj = numpart[jj]; C_y = numpart_jj/device_number; if(numpart_jj%device_number != 0){ C_y++; } kk = kk + pid * C_y; if(kk < C_y * pid || kk >= C_y * (pid + 1)){ return ; } } else return ; if(0<=L && L < (L_MAX-interval)) { /* loop condition */ for(int h=0; h<error_array_num; h++) { if(L==error_array[h]){ return; } } if( 0<=kk && kk < numpart_jj ) { int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; int dim0 = size_array[L*NoP*2 + PIDX*2]; int dim1 = size_array[L*NoP*2 + PIDX*2+1]; if( idx < 0 || dim0*dim1 <= idx) return; /* pointer adjustment */ FLOAT *src; unsigned long long int ptr_adjuster = (unsigned long long int)src_start; for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoP; j++) { int height = size_array[i*NoP*2 + j*2]; int width = size_array[i*NoP*2 + j*2+1]; ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int j=0; j<PIDX; j++) { int height = size_array[L*NoP*2 + j*2]; int width = size_array[L*NoP*2 + j*2+1]; ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT)); } src = (FLOAT *)ptr_adjuster; *(src + idx) *= -1; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // dt helper function __device__ void dt_helper(FLOAT *src, FLOAT *dst, int *ptr, int step, int s1, int s2, int d1, int d2, FLOAT a, FLOAT b) { if (d2 >= d1) { int d = (d1+d2) >> 1; int ds =d*step; int s = s1; FLOAT src_ss = *(src+s*step); for (int p = s1+1; p <= s2; p++) { int t1 = d-s; int t2 = d-p; if (src_ss + a*t1*t1 + b*t1 > *(src+p*step) + a*t2*t2 + b*t2) { s = p; src_ss = *(src+s*step); } } int D = d-s; dst[ds] = *(src+s*step) + a*D*D + b*D; ptr[ds] = s; dt_helper(src, dst, ptr, step, s1, s, d1, d-1, a, b); dt_helper(src, dst, ptr, step, s, s2, d+1, d2, a, b); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //sub function of dt extern "C" __global__ void dt1d_x( FLOAT *src_start, // part_C_dev FLOAT *dst_start, // tmpM_dev int *ptr_start, // tmpIy_dev int *DID_4_array, // DID_4_array_dev FLOAT *def_array, // def_array_dev int *size_array, // pm_size_array_dev int NoP, // NoP int *PIDX_array, // PIDX_array_dev int *error_array, // part_error_array_dev int error_array_num, // part_error_array_num int *numpart, // numpart_jj int NoC, // NoC int max_numpart, // max_numpart int interval, // interval int L_MAX, // L_MAX int pid, // pid int device_number // device_number ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int kk = blockIdx.y * blockDim.y + threadIdx.y; int jj = threadIdx.z; int L = blockIdx.z; int numpart_jj; int C_y; if(0<=jj && jj<NoC) { numpart_jj = numpart[jj]; C_y = numpart_jj/device_number; if(numpart_jj%device_number != 0){ C_y++; } kk = kk + pid * C_y; if(kk < C_y * pid || kk >= C_y * (pid + 1)){ return ; } } else{ return ; } if(0<=L && L<(L_MAX-interval)) { /* loop condition */ for(int h=0; h<error_array_num; h++) { if(L==error_array[h]){ return; } } if(0<=kk && kk<numpart_jj) { int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; int dim1 = size_array[L*NoP*2 + PIDX*2+1]; if( idx < 0 || dim1 <= idx ) return; int dim0 = size_array[L*NoP*2 + PIDX*2]; int XD=0; int step = 1; int n = dim0; int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; FLOAT a = def_array[DID_4+2]; FLOAT b = def_array[DID_4+3]; /* pointer adjustment */ unsigned long long int adj_src = (unsigned long long int)src_start; unsigned long long int adj_dst = (unsigned long long int)dst_start; unsigned long long int adj_ptr = (unsigned long long int)ptr_start; /* for src */ for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoP; j++) { int height = size_array[i*NoP*2 + j*2]; int width = size_array[i*NoP*2 + j*2+1]; adj_src += (unsigned long long int)(height*width*sizeof(FLOAT)); } } for(int j=0; j<PIDX; j++) { int height = size_array[L*NoP*2 + j*2]; int width = size_array[L*NoP*2 + j*2+1]; adj_src += (unsigned long long int)(height*width*sizeof(FLOAT)); } /* for dst, ptr */ // adjust "dst" to tmpM[L][jj][kk] // adjust "ptr" to tmpIy[L][jj][kk] for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoC; j++) { for(int k=0; k<numpart[j]; k++) { int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k]; int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2]; int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1]; adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } } for(int i=0; i<jj; i++) { for(int j=0; j<numpart[i]; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j] int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2] int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1] adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } for(int j=0; j<kk; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j]; // PIDX_array[L][jj][j] int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2] int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1] adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } FLOAT *src = (FLOAT *)adj_src; FLOAT *dst = (FLOAT *)adj_dst; int *ptr = (int *)adj_ptr; /* main calculation of di1d_x */ XD = idx*dim0; dt_helper(src+XD, dst+XD, ptr+XD, step, 0, n-1, 0, n-1, a, b); } } } extern "C" __global__ void dt1d_y( FLOAT *src_start, // tmpM_dev FLOAT *dst_start, // M_dev int *ptr_start, // tmpIx_dev int *DID_4_array, // DID_4_array_dev FLOAT *def_array, // def_array_dev int NoP, // NoP int *size_array, // pm_size_array_dev int *numpart, // numpart_jj int *PIDX_array, // PIDX_array_dev int NoC, // NoC int max_numpart, // max_numpart int interval, // interval int L_MAX, // L_MAX int *error_array, // part_error_array_dev int error_array_num, // part_error_array_num int pid, // pid int device_number // device_number ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int kk = blockIdx.y * blockDim.y + threadIdx.y; int jj = threadIdx.z; int L = blockIdx.z; int numpart_jj; int C_y; if(0<=jj && jj<NoC) { numpart_jj = numpart[jj]; C_y = numpart_jj/device_number; if(numpart_jj%device_number != 0){ C_y++; } kk = kk + pid * C_y; if(kk < C_y * pid || kk >= C_y * (pid + 1)){ return ; } } else{ return ; } if(0<=L && L<(L_MAX-interval)) { /* loop condition */ for(int h=0; h<error_array_num; h++) { if(L==error_array[h]){ return; } } if( 0<=kk && kk<numpart_jj) { int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; int dim0 = size_array[L*NoP*2 + PIDX*2]; if( idx < 0 || dim0 <= idx ) return; int dim1 = size_array[L*NoP*2 + PIDX*2+1]; int step = dim0; int n = dim1; int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk]; FLOAT a = def_array[DID_4]; // ax FLOAT b = def_array[DID_4+1]; // bx /* pointer adjustment */ unsigned long long int adj_src = (unsigned long long int)src_start; unsigned long long int adj_dst = (unsigned long long int)dst_start; unsigned long long int adj_ptr = (unsigned long long int)ptr_start; /* for src, dst, ptr */ /* adjust "src" to tmpM[L][jj][kk] */ /* adjust "dst" to M[L][jj][kk] */ /* adjust "ptr" to tmpIx[L][jj][kk] */ for(int i=0; i<L; i++) { /* apply error condition */ int error_flag=0; for(int h=0; h<error_array_num; h++) { if(i==error_array[h]){ error_flag = 1; } } if(error_flag != 0) { continue; } for(int j=0; j<NoC; j++) { for(int k=0; k<numpart[j]; k++) { int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k]; int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2]; int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1]; adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } } for(int i=0; i<jj; i++) { for(int j=0; j<numpart[i]; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j] int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2] int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1] adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } } for(int j=0; j<kk; j++) { int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j]; int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT)); adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int)); } FLOAT *src = (FLOAT *)adj_src; FLOAT *dst = (FLOAT *)adj_dst; int *ptr = (int *)adj_ptr; dt_helper(src+idx, dst+idx, ptr+idx, step, 0, n-1, 0, n-1, a, b); } } } /*************************************************************/ /*************************************************************/ /* original source of dt function loop */ // for (int x = 0; x < dims[1]; x++) // { // dt1d(vals+XD, tmpM+XD, tmpIy+XD, 1, dims[0], ay, by); // XD+=dims[0]; // } // for (int y = 0; y < dims[0]; y++) // { // dt1d(tmpM+y, M+y, tmpIx+y, dims[0], dims[1], ax, bx); // } /*************************************************************/ /*************************************************************/ extern "C" __global__ void calc_a_score( int IWID, int IHEI, FLOAT scale, int padx_n, int pady_n, int *RX_array, int *RY_array, FLOAT *ac_score, FLOAT *score_array, int *ssize_array, int NoC, int *size_score_array ) { int ii = blockIdx.x * blockDim.x + threadIdx.x; int jj = blockIdx.y * blockDim.y + threadIdx.y; int component_jj = threadIdx.z; if(0<=component_jj && component_jj < NoC) { unsigned long long int pointer_score = (unsigned long long int)score_array; unsigned long long int pointer_ssize = (unsigned long long int)ssize_array; unsigned long long int pointer_RX = (unsigned long long int)RX_array; unsigned long long int pointer_RY = (unsigned long long int)RY_array; for(int k=0; k<component_jj; k++) { pointer_score += (unsigned long long int)size_score_array[k]; pointer_ssize += (unsigned long long int)(sizeof(int)); pointer_RX += (unsigned long long int)(sizeof(int)); pointer_RY += (unsigned long long int)(sizeof(int)); } FLOAT *score = (FLOAT *)pointer_score; int ssize0 = *((int *)pointer_ssize); int ssize1 = *((int *)pointer_ssize + sizeof(int)); int RX = *((int *)pointer_RX); int RY = *((int *)pointer_RY); if(0<=ii && ii<IWID && 0<=jj && jj<IHEI) { int Xn = (int)((FLOAT)ii/scale+padx_n); int Yn = (int)((FLOAT)jj/scale+pady_n); if(Yn<ssize0 && Xn<ssize1) { FLOAT sc = score[Yn+Xn*ssize0]; int Im_Y = jj+RY; int Im_X = ii+RX; if(Im_Y<IHEI && Im_X<IWID) { FLOAT *PP = ac_score+Im_Y+Im_X*IHEI; if(sc>*PP) *PP=sc; } } } } /*************************************************************/ /*************************************************************/ /* original source of calc_a_score loop */ // for(int ii=0;ii<IWID;ii++) // { // int Xn=(int)((FLOAT)ii/scale+padx_n); // for(int jj=0;jj<IHEI;jj++) // { // int Yn =(int)((FLOAT)jj/scale+pady_n); // if(Yn<ssize[0] && Xn<ssize[1]) // { // FLOAT sc = score[Yn+Xn*ssize[0]]; //get score of pixel // int Im_Y = jj+RY; // int Im_X = ii+RX; // if(Im_Y<IHEI && Im_X<IWID) // { // FLOAT *PP=ac_score+Im_Y+Im_X*IHEI; //consider root rectangle size // if(sc>*PP) *PP=sc; //save max score // } // } // } // } /*************************************************************/ /*************************************************************/ } __device__ static inline int min_i(int x, int y) {return (x <= y ? x : y);} #ifdef USE_FLOAT_AS_DECIMAL /************************************************/ /* atomic function dealing with float precision */ __device__ static inline float atomicAdd_float(float *address, float val) { return atomicAdd(address, val); // atomicAdd must be called from "__device__" function } /*************************************************/ #else /* ifdef USE_FLOAT_AS_DECIMAL */ /*************************************************/ /* atomic function dealing with double precision */ __device__ static inline double atomicAdd_double (double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); }while(assumed != old); return __longlong_as_double(old); } /*************************************************/ #endif /* ifdef USE_FLOAT_AS_DECIMAL */ /***********************************************************/ /* function which cast from int2 to unsigned long long int */ __device__ static inline unsigned long long int hiloint2uint64(int hi, int lo) { int combined[] = {hi, lo}; return *reinterpret_cast<unsigned long long int*>(combined); } /***********************************************************/ /* declaration of texture memory */ #ifdef USE_FLOAT_AS_DECIMAL texture<float, cudaTextureType1D, cudaReadModeElementType> resized_image; #else texture<uint2, cudaTextureType1D, cudaReadModeElementType> resized_image_double; #endif texture<int , cudaTextureType1D, cudaReadModeElementType> resized_image_size; texture<int, cudaTextureType1D, cudaReadModeElementType> image_idx_incrementer; texture<uint2, cudaTextureType1D, cudaReadModeElementType> hist_ptr_incrementer; texture<uint2, cudaTextureType1D, cudaReadModeElementType> norm_ptr_incrementer; texture<uint2, cudaTextureType1D, cudaReadModeElementType> feat_ptr_incrementer; extern "C" __global__ void calc_hist ( FLOAT *hist_top, int sbin, int visible_0, int visible_1, int level ) { /* index of each pixels */ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; const FLOAT Hcos[9] = {1.0000, 0.9397, 0.7660, 0.5000, 0.1736, -0.1736, -0.5000, -0.7660, -0.9397}; const FLOAT Hsin[9] = {0.0000, 0.3420, 0.6428, 0.8660, 0.9848, 0.9848, 0.8660, 0.6428, 0.3420}; /* adjust pointer position */ int base_index = tex1Dfetch(image_idx_incrementer, level); uint2 ptr_hist_uint2 = tex1Dfetch(hist_ptr_incrementer, level); unsigned long long int ptr_hist = (unsigned long long int)hist_top + hiloint2uint64(ptr_hist_uint2.x, ptr_hist_uint2.y); // convert uint2 -> unsigned long long int FLOAT *hist = (FLOAT *)ptr_hist; /* input size */ const int height = tex1Dfetch(resized_image_size, level*3); const int width = tex1Dfetch(resized_image_size, level*3 + 1); const int dims[2] = {height, width}; /* size of Histgrams and Norm calculation space */ const int blocks[2] = { (int)floor((double)height/(double)sbin+0.5), (int)floor((double)width/(double)sbin+0.5) }; // for (int x=1; x<visible[1]-1; x++) { // for (int y=1; y<visible[0]-1; y++) { if (1<=x && x<visible_1-1 && 1<=y && y<visible_0-1) { /* first color channel */ // base_index += min_i(x, dims[1]-2)*dims[0] + min_i(y, dims[0]-2); base_index += min_i(x, dims[1]-2) + min_i(y, dims[0]-2)*dims[1]; FLOAT dx, dy; #ifdef USE_FLOAT_AS_DECIMAL { // dy = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; // dx = tex1Dfetch(resized_image, base_index + dims[0]) - tex1Dfetch(resized_image, base_index - dims[0]) ; dx = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; dy = tex1Dfetch(resized_image, base_index + dims[1]) - tex1Dfetch(resized_image, base_index - dims[1]) ; } #else { int2 arg1 = tex1Dfetch(resized_image_double, base_index + 1); int2 arg2 = tex1Dfetch(resized_image_double, base_index - 1) ; // dy = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); dx = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); // arg1 = tex1Dfetch(resized_image_double, base_index + dims[0]); // arg2 = tex1Dfetch(resized_image_double, base_index - dims[0]); // dx = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); arg1 = tex1Dfetch(resized_image_double, base_index + dims[1]); arg2 = tex1Dfetch(resized_image_double, base_index - dims[1]); dy = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); } #endif FLOAT v = dx*dx + dy*dy; /* second color channel */ base_index += dims[0]*dims[1]; FLOAT dx2, dy2; #ifdef USE_FLOAT_AS_DECIMAL { // dy2 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; // dx2 = tex1Dfetch(resized_image, base_index + dims[0]) - tex1Dfetch(resized_image, base_index - dims[0]) ; dx2 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; dy2 = tex1Dfetch(resized_image, base_index + dims[1]) - tex1Dfetch(resized_image, base_index - dims[1]) ; } #else { int2 arg1 = tex1Dfetch(resized_image_double, base_index + 1); int2 arg2 = tex1Dfetch(resized_image_double, base_index - 1) ; // dy2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); dx2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); // arg1 = tex1Dfetch(resized_image_double, base_index + dims[0]); // arg2 = tex1Dfetch(resized_image_double, base_index - dims[0]); // dx2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); arg1 = tex1Dfetch(resized_image_double, base_index + dims[1]); arg2 = tex1Dfetch(resized_image_double, base_index - dims[1]); dy2 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); } #endif FLOAT v2 = dx2*dx2 + dy2*dy2; /* third color channel */ base_index += dims[0]*dims[1]; FLOAT dx3, dy3; #ifdef USE_FLOAT_AS_DECIMAL { // dy3 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; // dx3 = tex1Dfetch(resized_image, base_index + dims[0]) - tex1Dfetch(resized_image, base_index - dims[0]) ; dx3 = tex1Dfetch(resized_image, base_index + 1) - tex1Dfetch(resized_image, base_index - 1) ; dy3 = tex1Dfetch(resized_image, base_index + dims[1]) - tex1Dfetch(resized_image, base_index - dims[1]) ; } #else { int2 arg1 = tex1Dfetch(resized_image_double, base_index + 1); int2 arg2 = tex1Dfetch(resized_image_double, base_index - 1) ; // dy3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); dx3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); // arg1 = tex1Dfetch(resized_image_double, base_index + dims[0]); // arg2 = tex1Dfetch(resized_image_double, base_index - dims[0]); // dx3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); arg1 = tex1Dfetch(resized_image_double, base_index + dims[1]); arg2 = tex1Dfetch(resized_image_double, base_index - dims[1]); dy3 = __hiloint2double(arg1.y, arg1.x) - __hiloint2double(arg2.y, arg2.x); } #endif FLOAT v3 = dx3*dx3 + dy3*dy3; /* pick channel with strongest gradient */ // if (v2 > v) { // v = v2; // dx = dx2; // dy = dy2; // } dx = (v2 > v) ? dx2 : dx; dy = (v2 > v) ? dy2 : dy; v = (v2 > v) ? v2 : v; // if (v3 > v) { // v = v3; // dx = dx3; // dy = dy3; // } dx = (v3 > v) ? dx3 : dx; dy = (v3 > v) ? dy3 : dy; v = (v3 > v) ? v3 : v; /* snap to one of 18 orientations */ FLOAT best_dot = 0; int best_o = 0; #pragma unroll 9 for (int o=0; o<9; o++) { FLOAT dot = Hcos[o]*dx + Hsin[o]*dy; if (dot > best_dot) { best_dot = dot; best_o = o; } else if (-dot > best_dot) { best_dot = -dot; best_o = o + 9; } } /*add to 4 histgrams aroud pixel using linear interpolation*/ FLOAT xp = ((FLOAT)x+0.5)/(FLOAT)sbin - 0.5; FLOAT yp = ((FLOAT)y+0.5)/(FLOAT)sbin - 0.5; int ixp = (int)floor((double)xp); int iyp = (int)floor((double)yp); FLOAT vx0 = xp - ixp; FLOAT vy0 = yp - iyp; FLOAT vx1 = 1.0 - vx0; FLOAT vy1 = 1.0 - vy0; v = sqrt((double)v); #ifdef USE_FLOAT_AS_DECIMAL { /* dummy variable to reduce warp divergence */ // float retval = 0; if (ixp >= 0 && iyp >= 0) { atomicAdd_float((float *)(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx1*vy1*v); } // retval = (ixp >= 0 && iyp >= 0) ? // atomicAdd_float((float *)(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx1*vy1*v) : // 0; if (ixp+1 < blocks[1] && iyp >= 0) { atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx0*vy1*v); } // retval = (ixp+1 < blocks[1] && iyp >= 0) ? // atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (float)vx0*vy1*v) : // 0; if (ixp >= 0 && iyp+1 < blocks[0]) { atomicAdd_float((float *)(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx1*vy0*v); } // retval = (ixp >= 0 && iyp+1 < blocks[0]) ? // atomicAdd_float((float *)(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx1*vy0*v) : // 0; if (ixp+1 < blocks[1] && iyp+1 < blocks[0]) { atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx0*vy0*v); } // retval = (ixp+1 < blocks[1] && iyp+1 < blocks[0]) ? // atomicAdd_float((float *)(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (float)vx0*vy0*v) : // 0; } #else /* ifdef USE_FLOAT_AS_DECIMAL */ { if (ixp >= 0 && iyp >= 0) { atomicAdd_double((double *)(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (double)vx1*vy1*v); } if (ixp+1 < blocks[1] && iyp >= 0) { atomicAdd_double((double *)(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]), (double)vx0*vy1*v); } if (ixp >= 0 && iyp+1 < blocks[0]) { atomicAdd_double((double *)(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (double)vx1*vy0*v); } if (ixp+1 < blocks[1] && iyp+1 < blocks[0]) { atomicAdd_double((double *)(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]), (double)vx0*vy0*v); } } #endif /* ifdef USE_FLOAT_AS_DECIMAL */ } // } // } /*************************************************************/ /* original source of calc hist loop */ // for (int x=1; x<visible[1]-1; x++) { // for (int y=1; y<visible[0]-1; y++) { // /* first color channel */ // FLOAT *s = SRC + min_i(x, dims[1]-2)*dims[0] + min_i(y, dims[0]-2); // FLOAT dy = *(s+1) - *(s-1); // FLOAT dx = *(s+dims[0]) - *(s-dims[0]); // FLOAT v = dx*dx + dy*dy; // /* second color channel */ // s += dims[0]*dims[1]; // FLOAT dy2 = *(s+1) - *(s-1); // FLOAT dx2 = *(s+dims[0]) - *(s-dims[0]); // FLOAT v2 = dx2*dx2 + dy2*dy2; // /* third color channel */ // s += dims[0]*dims[1]; // FLOAT dy3 = *(s+1) - *(s-1); // FLOAT dx3 = *(s+dims[0]) - *(s-dims[0]); // FLOAT v3 = dx3*dx3 + dy3*dy3; // /* pick channel with strongest gradient */ // if (v2 > v) { // v = v2; // dx = dx2; // dy = dy2; // } // if (v3 > v) { // v = v3; // dx = dx3; // dy = dy3; // } // /* snap to one of 18 orientations */ // FLOAT best_dot = 0; // int best_o = 0; // for (int o=0; o<9; o++) { // FLOAT dot = Hcos[o]*dx + Hsin[o]*dy; // if (dot > best_dot) { // best_dot = dot; // best_o = o; // } // else if (-dot > best_dot) { // best_dot = -dot; // best_o = o + 9; // } // } // /*add to 4 histgrams aroud pixel using linear interpolation*/ // FLOAT xp = ((FLOAT)x+0.5)/(FLOAT)sbin - 0.5; // FLOAT yp = ((FLOAT)y+0.5)/(FLOAT)sbin - 0.5; // int ixp = (int)floor(xp); // int iyp = (int)floor(yp); // FLOAT vx0 = xp - ixp; // FLOAT vy0 = yp - iyp; // FLOAT vx1 = 1.0 - vx0; // FLOAT vy1 = 1.0 - vy0; // v = sqrt(v); // if (ixp >= 0 && iyp >= 0) { // *(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]) += vx1*vy1*v; // } // if (ixp+1 < blocks[1] && iyp >= 0) { // *(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]) += vx0*vy1*v; // } // if (ixp >= 0 && iyp+1 < blocks[0]) { // *(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]) += vx1*vy0*v; // } // if (ixp+1 < blocks[1] && iyp+1 < blocks[0]) { // *(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]) += vx0*vy0*v; // } // } // } /*************************************************************/ /*************************************************************/ } extern "C" __global__ void calc_norm ( FLOAT *hist_top, FLOAT *norm_top, int blocks_0, int blocks_1, int level ) { /* index of each element of norm */ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x<blocks_1 && y<blocks_0) { /* adjust pointer position */ uint2 ptr_uint2 = tex1Dfetch(hist_ptr_incrementer, level); unsigned long long int ptr_hist = (unsigned long long int)hist_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int ptr_uint2 = tex1Dfetch(norm_ptr_incrementer, level); unsigned long long int ptr_norm = (unsigned long long int)norm_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *dst = (FLOAT *)(ptr_norm + (x*blocks_0 + y)*sizeof(FLOAT)); FLOAT add_val = 0; #pragma unroll 9 for (int orient=0; orient<9; orient++) { FLOAT *src1 = (FLOAT *)(ptr_hist + (orient*blocks_0*blocks_1 + x*blocks_0 + y)*sizeof(FLOAT)); FLOAT *src2 = (FLOAT *)(ptr_hist + ((orient+9)*blocks_0*blocks_1 + x*blocks_0 + y)*sizeof(FLOAT)); add_val += (*src1 + *src2) * (*src1 + *src2); } *(dst) += add_val; } /*************************************************************/ /* original source of compute_energy loop */ // /* compute energy in each block by summing over orientations */ // for (int o=0; o<9; o++) { // FLOAT *src1 = hist + o*blocks[0]*blocks[1]; // FLOAT *src2 = hist + (o+9)*blocks[0]*blocks[1]; // FLOAT *dst = norm; // FLOAT *end = norm + blocks[0]*blocks[1]; // while(dst < end) { // *(dst++) += (*src1 + *src2) * (*src1 + *src2); // src1++; // src2++; // } // } /*************************************************************/ /*************************************************************/ } /* definition of constant */ #define EPS 0.0001 //return minimum number (FLOAT) __device__ static inline FLOAT min_2(FLOAT x) {return (x <= 0.2 ? x :0.2);} extern "C" __global__ void calc_feat ( FLOAT *hist_top, FLOAT *norm_top, FLOAT *feat_top, int out_0, int out_1, int blocks_0, int blocks_1, int level ) { /* index of each element of feat */ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; /* adjust pointer position */ uint2 ptr_uint2 = tex1Dfetch(hist_ptr_incrementer, level); unsigned long long int ptr_hist = (unsigned long long int)hist_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *hist = (FLOAT *)ptr_hist; ptr_uint2 = tex1Dfetch(norm_ptr_incrementer, level); unsigned long long int ptr_norm = (unsigned long long int)norm_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *norm = (FLOAT *)ptr_norm; ptr_uint2 = tex1Dfetch(feat_ptr_incrementer, level); unsigned long long int ptr_feat = (unsigned long long int)feat_top + hiloint2uint64(ptr_uint2.x, ptr_uint2.y); // convert uint2 -> unsigned long long int FLOAT *feat = (FLOAT *)ptr_feat; if (x<out_1 && y<out_0) { // for (int x=0; x<out[1]; x++) { // for (int y=0; y<out[0]; y++) { // FLOAT *dst = feat + x*out[0] + y; FLOAT *dst = feat + x*out_0 + y; FLOAT *src, *p, n1, n2, n3, n4; // p = norm + (x+1)*blocks[0] + y+1; p = norm + (x+1)*blocks_0 + y+1; // n1 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n1 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); // p = norm + (x+1)*blocks[0] + y; p = norm + (x+1)*blocks_0 + y; // n2 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n2 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); // p = norm + x*blocks[0] + y+1; p = norm + x*blocks_0 + y+1; // n3 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n3 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); // p = norm + x*blocks[0] + y; p = norm + x*blocks_0 + y; // n4 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + EPS); n4 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks_0) + *(p+blocks_0+1) + EPS); FLOAT t1 = 0; FLOAT t2 = 0; FLOAT t3 = 0; FLOAT t4 = 0; /* contrast-sensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); src = hist + (x+1)*blocks_0 + (y+1); #pragma unroll 18 for (int o=0; o<18; o++) { FLOAT h1 = min_2(*src * n1); FLOAT h2 = min_2(*src * n2); FLOAT h3 = min_2(*src * n3); FLOAT h4 = min_2(*src * n4); *dst = 0.5 * (h1 + h2 + h3 + h4); t1 += h1; t2 += h2; t3 += h3; t4 += h4; // dst += out[0]*out[1]; dst += out_0*out_1; // src += blocks[0]*blocks[1]; src += blocks_0*blocks_1; } /* contrast-insensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); src = hist + (x+1)*blocks_0 + (y+1); #pragma unroll 9 for (int o=0; o<9; o++) { // FLOAT sum = *src + *(src + 9*blocks[0]*blocks[1]); FLOAT sum = *src + *(src + 9*blocks_0*blocks_1); FLOAT h1 = min_2(sum * n1); FLOAT h2 = min_2(sum * n2); FLOAT h3 = min_2(sum * n3); FLOAT h4 = min_2(sum * n4); *dst = 0.5 * (h1 + h2 + h3 + h4); // dst += out[0]*out[1]; dst += out_0*out_1; // src += blocks[0]*blocks[1]; src += blocks_0*blocks_1; } /* texture features */ *dst = 0.2357 * t1; // dst += out[0]*out[1]; dst += out_0*out_1; *dst = 0.2357 * t2; // dst += out[0]*out[1]; dst += out_0*out_1; *dst = 0.2357 * t3; // dst += out[0]*out[1]; dst += out_0*out_1; *dst = 0.2357 * t4; } // } //} /*************************************************************/ /* original source of compute features loop */ // /* compute featuers */ // for (int x=0; x<out[1]; x++) { // for (int y=0; y<out[0]; y++) { // FLOAT *dst = feat + x*out[0] + y; // FLOAT *src, *p, n1, n2, n3, n4; // p = norm + (x+1)*blocks[0] + y+1; // n1 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // p = norm + (x+1)*blocks[0] + y; // n2 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // p = norm + x*blocks[0] + y+1; // n3 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // p = norm + x*blocks[0] + y; // n4 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); // FLOAT t1 = 0; // FLOAT t2 = 0; // FLOAT t3 = 0; // FLOAT t4 = 0; // /* contrast-sensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); // for (int o=0; o<18; o++) { // FLOAT h1 = min_2(*src * n1); // FLOAT h2 = min_2(*src * n2); // FLOAT h3 = min_2(*src * n3); // FLOAT h4 = min_2(*src * n4); // *dst = 0.5 * (h1 + h2 + h3 + h4); // t1 += h1; // t2 += h2; // t3 += h3; // t4 += h4; // dst += out[0]*out[1]; // src += blocks[0]*blocks[1]; // } // /* contrast-insensitive features */ // src = hist + (x+1)*blocks[0] + (y+1); // for (int o=0; o<9; o++) { // FLOAT sum = *src + *(src + 9*blocks[0]*blocks[1]); // FLOAT h1 = min_2(sum * n1); // FLOAT h2 = min_2(sum * n2); // FLOAT h3 = min_2(sum * n3); // FLOAT h4 = min_2(sum * n4); // *dst = 0.5 * (h1 + h2 + h3 + h4); // dst += out[0]*out[1]; // src += blocks[0]*blocks[1]; // } // /* texture features */ // *dst = 0.2357 * t1; // dst += out[0]*out[1]; // *dst = 0.2357 * t2; // dst += out[0]*out[1]; // *dst = 0.2357 * t3; // dst += out[0]*out[1]; // *dst = 0.2357 * t4; // } // } /*************************************************************/ /*************************************************************/ } /* texture declaration for original image */ #ifdef USE_FLOAT_AS_DECIMAL texture<float, cudaTextureType2DLayered, cudaReadModeElementType> org_image; #else texture<uint2, cudaTextureType2DLayered, cudaReadModeElementType> org_image; #endif #ifndef USE_FLOAT_AS_DECIMAL #define NO_HARDWARE_SUPPORT #endif #ifdef NO_HARDWARE_SUPPORT __device__ static inline double getPixelVal(int x, int y, int width, int height, int channel) { int access_x = (x < 0) ? 0 : (x < width) ? x : (width-1); int access_y = (y < 0) ? 0 : (y < height) ? y : (height-1); int2 retval = tex1Dfetch(org_image, channel*height*width + access_y*width + access_x); return __hiloint2double(retval.y, retval.x); } #endif extern "C" __global__ void resize ( int src_height, int src_width, FLOAT *dst_top, int dst_height, int dst_width, FLOAT hfactor, FLOAT wfactor, int level ) { int dst_x = blockIdx.x*blockDim.x + threadIdx.x; int dst_y = blockIdx.y*blockDim.y + threadIdx.y; int channel = blockIdx.z; FLOAT *dst = dst_top + tex1Dfetch(image_idx_incrementer, level) + channel*dst_height*dst_width; // unsigned long long int dst_ptr = (unsigned long long int)dst_top + // (unsigned long long int)(tex1Dfetch(image_idx_incrementer, level) + channel*dst_height*dst_width)*sizeof(FLOAT); // FLOAT *dst = (FLOAT *)dst_ptr; FLOAT src_x_decimal = wfactor * dst_x + 0.5f; FLOAT src_y_decimal = hfactor * dst_y + 0.5f; #ifdef USE_FLOAT_AS_DECIMAL if (dst_x < dst_width && dst_y < dst_height) { dst[dst_y*dst_width + dst_x] = (FLOAT)tex2DLayered(org_image, src_x_decimal, src_y_decimal, channel); } #else /* if "double" type is used to express decimal value, there is no hardware support */ int src_x = (int)src_x_decimal; int src_y = (int)src_y_decimal; double color[4] = { getPixelVal(src_x, src_y, src_width, src_height, channel), getPixelVal(src_x+1, src_y, src_width, src_height, channel), getPixelVal(src_x, src_y+1, src_width, src_height, channel), getPixelVal(src_x+1, src_y+1, src_width, src_height, channel) }; double new_element = (src_x + 1 - src_x_decimal)*(src_y + 1 - src_y_decimal)*color[0] + (src_x_decimal - src_x)*(src_y + 1 - src_y_decimal)*color[1] + (src_x + 1 - src_x_decimal)*(src_y_decimal - src_y)*color[2] + (src_x_decimal - src_x)*(src_y_decimal - src_y)*color[3]; if (dst_x < dst_width && dst_y < dst_height) { dst[dst_y*dst_width + dst_x] = new_element; } #endif }
98ff8129f90917541da8d354385f0c618c5a2c78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef VIENNACL_LINALG_CUDA_DIRECT_SOLVE_HPP #define VIENNACL_LINALG_CUDA_DIRECT_SOLVE_HPP /* ========================================================================= Copyright (c) 2010-2016, Institute for Microelectronics, Institute for Analysis and Scientific Computing, TU Wien. Portions of this software are copyright by UChicago Argonne, LLC. ----------------- ViennaCL - The Vienna Computing Library ----------------- Project Head: Karl Rupp [email protected] (A list of authors and contributors can be found in the manual) License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ /** @file viennacl/linalg/cuda/direct_solve.hpp @brief Implementations of dense direct solvers using CUDA are found here. */ #include "viennacl/forwards.h" #include "viennacl/vector.hpp" #include "viennacl/matrix.hpp" #include "viennacl/linalg/cuda/common.cu" namespace viennacl { namespace linalg { namespace cuda { template<typename NumericT> __global__ void matrix_matrix_upper_solve_kernel( const NumericT * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, bool row_major_A, NumericT * B, unsigned int B_start1, unsigned int B_start2, unsigned int B_inc1, unsigned int B_inc2, unsigned int B_size1, unsigned int B_size2, unsigned int B_internal_size1, unsigned int B_internal_size2, bool row_major_B, bool unit_diagonal) { NumericT temp; NumericT entry_A; for (unsigned int row_cnt = 0; row_cnt < A_size1; ++row_cnt) { unsigned int row = A_size1 - 1 - row_cnt; if (!unit_diagonal) { __syncthreads(); if (threadIdx.x == 0) { if (row_major_B) B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; else //if (!row_major_B) B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; } } __syncthreads(); if (row_major_B) temp = B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)]; else //if (!row_major_B) temp = B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1]; //eliminate column of op(A) with index 'row' in parallel: " << std::endl; for (unsigned int elim = threadIdx.x; elim < row; elim += blockDim.x) { if (row_major_A) entry_A = A[(elim * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; else //if (!row_major_A) entry_A = A[(elim * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; if (row_major_B) B[(elim * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] -= temp * entry_A; else //if (!row_major_B) B[(elim * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] -= temp * entry_A; } } } template<typename NumericT> __global__ void matrix_matrix_lower_solve_kernel( const NumericT * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, bool row_major_A, NumericT * B, unsigned int B_start1, unsigned int B_start2, unsigned int B_inc1, unsigned int B_inc2, unsigned int B_size1, unsigned int B_size2, unsigned int B_internal_size1, unsigned int B_internal_size2, bool row_major_B, bool unit_diagonal) { NumericT temp; NumericT entry_A; for (unsigned int row = 0; row < A_size1; ++row) { if (!unit_diagonal) { __syncthreads(); if (threadIdx.x == 0) { if (row_major_B) B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; else //if (!row_major_B) B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; } } __syncthreads(); if (row_major_B) temp = B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)]; else //if (!row_major_B) temp = B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1]; //eliminate column of op(A) with index 'row' in parallel: " << std::endl; for (unsigned int elim = row + threadIdx.x + 1; elim < A_size1; elim += blockDim.x) { if (row_major_A) entry_A = A[(elim * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; else //if (!row_major_A) entry_A = A[(elim * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; if (row_major_B) B[(elim * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] -= temp * entry_A; else //if (!row_major_B) B[(elim * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] -= temp * entry_A; } } } namespace detail { template<typename TagT> bool is_unit_solve(TagT const & tag) { return false; } inline bool is_unit_solve(viennacl::linalg::unit_lower_tag) { return true; } inline bool is_unit_solve(viennacl::linalg::unit_upper_tag) { return true; } template<typename TagT> bool is_upper_solve(TagT const & tag) { return false; } inline bool is_upper_solve(viennacl::linalg::upper_tag) { return true; } inline bool is_upper_solve(viennacl::linalg::unit_upper_tag) { return true; } template<typename Matrix1T, typename Matrix2T, typename SolverTagT> void inplace_solve_impl(Matrix1T const & A, Matrix2T & B, SolverTagT const & tag) { typedef typename viennacl::result_of::cpu_value_type<Matrix1T>::type value_type; dim3 threads(128); dim3 grid(B.size2()); if (is_upper_solve(tag)) { hipLaunchKernelGGL(( matrix_matrix_upper_solve_kernel), dim3(grid),dim3(threads), 0, 0, viennacl::cuda_arg(A), static_cast<unsigned int>(viennacl::traits::start1(A)), static_cast<unsigned int>(viennacl::traits::start2(A)), static_cast<unsigned int>(viennacl::traits::stride1(A)), static_cast<unsigned int>(viennacl::traits::stride2(A)), static_cast<unsigned int>(viennacl::traits::size1(A)), static_cast<unsigned int>(viennacl::traits::size2(A)), static_cast<unsigned int>(viennacl::traits::internal_size1(A)), static_cast<unsigned int>(viennacl::traits::internal_size2(A)), bool(A.row_major()), viennacl::cuda_arg(B), static_cast<unsigned int>(viennacl::traits::start1(B)), static_cast<unsigned int>(viennacl::traits::start2(B)), static_cast<unsigned int>(viennacl::traits::stride1(B)), static_cast<unsigned int>(viennacl::traits::stride2(B)), static_cast<unsigned int>(viennacl::traits::size1(B)), static_cast<unsigned int>(viennacl::traits::size2(B)), static_cast<unsigned int>(viennacl::traits::internal_size1(B)), static_cast<unsigned int>(viennacl::traits::internal_size2(B)), bool(B.row_major()), is_unit_solve(tag) ); } else { hipLaunchKernelGGL(( matrix_matrix_lower_solve_kernel), dim3(grid),dim3(threads), 0, 0, viennacl::cuda_arg(A), static_cast<unsigned int>(viennacl::traits::start1(A)), static_cast<unsigned int>(viennacl::traits::start2(A)), static_cast<unsigned int>(viennacl::traits::stride1(A)), static_cast<unsigned int>(viennacl::traits::stride2(A)), static_cast<unsigned int>(viennacl::traits::size1(A)), static_cast<unsigned int>(viennacl::traits::size2(A)), static_cast<unsigned int>(viennacl::traits::internal_size1(A)), static_cast<unsigned int>(viennacl::traits::internal_size2(A)), bool(A.row_major()), viennacl::cuda_arg(B), static_cast<unsigned int>(viennacl::traits::start1(B)), static_cast<unsigned int>(viennacl::traits::start2(B)), static_cast<unsigned int>(viennacl::traits::stride1(B)), static_cast<unsigned int>(viennacl::traits::stride2(B)), static_cast<unsigned int>(viennacl::traits::size1(B)), static_cast<unsigned int>(viennacl::traits::size2(B)), static_cast<unsigned int>(viennacl::traits::internal_size1(B)), static_cast<unsigned int>(viennacl::traits::internal_size2(B)), bool(B.row_major()), is_unit_solve(tag) ); } } } // // Note: By convention, all size checks are performed in the calling frontend. No need to double-check here. // ////////////////// triangular solver ////////////////////////////////////// /** @brief Direct inplace solver for triangular systems with multiple right hand sides, i.e. A \ B (MATLAB notation). * * @param A The system matrix * @param B The matrix of row vectors, where the solution is directly written to * @param tag Solver tag for identifying the respective triangular solver */ template<typename NumericT, typename SolverTagT> void inplace_solve(matrix_base<NumericT> const & A, matrix_base<NumericT> & B, SolverTagT tag) { detail::inplace_solve_impl(A, B, tag); } // // Solve on vector // template<typename NumericT> __global__ void triangular_substitute_inplace_row_kernel( NumericT const * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, NumericT * v, unsigned int v_start, unsigned int v_inc, unsigned int v_size, unsigned int options) { NumericT temp; unsigned int unit_diagonal_flag = (options & (1 << 0)); unsigned int is_lower_solve = (options & (1 << 2)); unsigned int row; for (unsigned int rows_processed = 0; rows_processed < A_size1; ++rows_processed) //Note: A required to be square { row = is_lower_solve ? rows_processed : ((A_size1 - rows_processed) - 1); if (!unit_diagonal_flag) { __syncthreads(); if (threadIdx.x == 0) v[row * v_inc + v_start] /= A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; } __syncthreads(); temp = v[row * v_inc + v_start]; for (int elim = (is_lower_solve ? (row + threadIdx.x + 1) : threadIdx.x); elim < (is_lower_solve ? A_size1 : row); elim += blockDim.x) v[elim * v_inc + v_start] -= temp * A[(elim * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; } } template<typename NumericT> __global__ void triangular_substitute_inplace_col_kernel( NumericT const * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, NumericT * v, unsigned int v_start, unsigned int v_inc, unsigned int v_size, unsigned int options) { NumericT temp; unsigned int unit_diagonal_flag = (options & (1 << 0)); unsigned int is_lower_solve = (options & (1 << 2)); unsigned int row; for (unsigned int rows_processed = 0; rows_processed < A_size1; ++rows_processed) //Note: A required to be square { row = is_lower_solve ? rows_processed : ((A_size1 - rows_processed) - 1); if (!unit_diagonal_flag) { __syncthreads(); if (threadIdx.x == 0) v[row * v_inc + v_start] /= A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; } __syncthreads(); temp = v[row * v_inc + v_start]; for (int elim = (is_lower_solve ? (row + threadIdx.x + 1) : threadIdx.x); elim < (is_lower_solve ? A_size1 : row); elim += blockDim.x) v[elim * v_inc + v_start] -= temp * A[(elim * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; } } namespace detail { inline unsigned int get_option_for_solver_tag(viennacl::linalg::upper_tag) { return 0; } inline unsigned int get_option_for_solver_tag(viennacl::linalg::unit_upper_tag) { return (1 << 0); } inline unsigned int get_option_for_solver_tag(viennacl::linalg::lower_tag) { return (1 << 2); } inline unsigned int get_option_for_solver_tag(viennacl::linalg::unit_lower_tag) { return (1 << 2) | (1 << 0); } template<typename MatrixT, typename VectorT> void inplace_solve_vector_impl(MatrixT const & mat, VectorT & vec, unsigned int options) { typedef typename viennacl::result_of::cpu_value_type<MatrixT>::type value_type; if (mat.row_major()) { hipLaunchKernelGGL(( triangular_substitute_inplace_row_kernel), dim3(1), dim3(128), 0, 0, viennacl::cuda_arg(mat), static_cast<unsigned int>(viennacl::traits::start1(mat)), static_cast<unsigned int>(viennacl::traits::start2(mat)), static_cast<unsigned int>(viennacl::traits::stride1(mat)), static_cast<unsigned int>(viennacl::traits::stride2(mat)), static_cast<unsigned int>(viennacl::traits::size1(mat)), static_cast<unsigned int>(viennacl::traits::size2(mat)), static_cast<unsigned int>(viennacl::traits::internal_size1(mat)), static_cast<unsigned int>(viennacl::traits::internal_size2(mat)), viennacl::cuda_arg(vec), static_cast<unsigned int>(viennacl::traits::start(vec)), static_cast<unsigned int>(viennacl::traits::stride(vec)), static_cast<unsigned int>(viennacl::traits::size(vec)), options ); } else { hipLaunchKernelGGL(( triangular_substitute_inplace_col_kernel), dim3(1), dim3(128), 0, 0, viennacl::cuda_arg(mat), static_cast<unsigned int>(viennacl::traits::start1(mat)), static_cast<unsigned int>(viennacl::traits::start2(mat)), static_cast<unsigned int>(viennacl::traits::stride1(mat)), static_cast<unsigned int>(viennacl::traits::stride2(mat)), static_cast<unsigned int>(viennacl::traits::size1(mat)), static_cast<unsigned int>(viennacl::traits::size2(mat)), static_cast<unsigned int>(viennacl::traits::internal_size1(mat)), static_cast<unsigned int>(viennacl::traits::internal_size2(mat)), viennacl::cuda_arg(vec), static_cast<unsigned int>(viennacl::traits::start(vec)), static_cast<unsigned int>(viennacl::traits::stride(vec)), static_cast<unsigned int>(viennacl::traits::size(vec)), options ); } } } /** @brief Direct inplace solver for dense triangular systems (non-transposed version) * * @param mat The system matrix proxy * @param vec The load vector, where the solution is directly written to */ template<typename NumericT, typename SolverTagT> void inplace_solve(matrix_base<NumericT> const & mat, vector_base<NumericT> & vec, SolverTagT) { unsigned int options = detail::get_option_for_solver_tag(SolverTagT()); detail::inplace_solve_vector_impl(mat, vec, options); } } } } #endif
98ff8129f90917541da8d354385f0c618c5a2c78.cu
#ifndef VIENNACL_LINALG_CUDA_DIRECT_SOLVE_HPP #define VIENNACL_LINALG_CUDA_DIRECT_SOLVE_HPP /* ========================================================================= Copyright (c) 2010-2016, Institute for Microelectronics, Institute for Analysis and Scientific Computing, TU Wien. Portions of this software are copyright by UChicago Argonne, LLC. ----------------- ViennaCL - The Vienna Computing Library ----------------- Project Head: Karl Rupp [email protected] (A list of authors and contributors can be found in the manual) License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ /** @file viennacl/linalg/cuda/direct_solve.hpp @brief Implementations of dense direct solvers using CUDA are found here. */ #include "viennacl/forwards.h" #include "viennacl/vector.hpp" #include "viennacl/matrix.hpp" #include "viennacl/linalg/cuda/common.cu" namespace viennacl { namespace linalg { namespace cuda { template<typename NumericT> __global__ void matrix_matrix_upper_solve_kernel( const NumericT * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, bool row_major_A, NumericT * B, unsigned int B_start1, unsigned int B_start2, unsigned int B_inc1, unsigned int B_inc2, unsigned int B_size1, unsigned int B_size2, unsigned int B_internal_size1, unsigned int B_internal_size2, bool row_major_B, bool unit_diagonal) { NumericT temp; NumericT entry_A; for (unsigned int row_cnt = 0; row_cnt < A_size1; ++row_cnt) { unsigned int row = A_size1 - 1 - row_cnt; if (!unit_diagonal) { __syncthreads(); if (threadIdx.x == 0) { if (row_major_B) B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; else //if (!row_major_B) B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; } } __syncthreads(); if (row_major_B) temp = B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)]; else //if (!row_major_B) temp = B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1]; //eliminate column of op(A) with index 'row' in parallel: " << std::endl; for (unsigned int elim = threadIdx.x; elim < row; elim += blockDim.x) { if (row_major_A) entry_A = A[(elim * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; else //if (!row_major_A) entry_A = A[(elim * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; if (row_major_B) B[(elim * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] -= temp * entry_A; else //if (!row_major_B) B[(elim * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] -= temp * entry_A; } } } template<typename NumericT> __global__ void matrix_matrix_lower_solve_kernel( const NumericT * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, bool row_major_A, NumericT * B, unsigned int B_start1, unsigned int B_start2, unsigned int B_inc1, unsigned int B_inc2, unsigned int B_size1, unsigned int B_size2, unsigned int B_internal_size1, unsigned int B_internal_size2, bool row_major_B, bool unit_diagonal) { NumericT temp; NumericT entry_A; for (unsigned int row = 0; row < A_size1; ++row) { if (!unit_diagonal) { __syncthreads(); if (threadIdx.x == 0) { if (row_major_B) B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; else //if (!row_major_B) B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] /= (row_major_A) ? A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)] : A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2)*A_internal_size1]; } } __syncthreads(); if (row_major_B) temp = B[(row * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)]; else //if (!row_major_B) temp = B[(row * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1]; //eliminate column of op(A) with index 'row' in parallel: " << std::endl; for (unsigned int elim = row + threadIdx.x + 1; elim < A_size1; elim += blockDim.x) { if (row_major_A) entry_A = A[(elim * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; else //if (!row_major_A) entry_A = A[(elim * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; if (row_major_B) B[(elim * B_inc1 + B_start1) * B_internal_size2 + (blockIdx.x * B_inc2 + B_start2)] -= temp * entry_A; else //if (!row_major_B) B[(elim * B_inc1 + B_start1) + (blockIdx.x * B_inc2 + B_start2) * B_internal_size1] -= temp * entry_A; } } } namespace detail { template<typename TagT> bool is_unit_solve(TagT const & tag) { return false; } inline bool is_unit_solve(viennacl::linalg::unit_lower_tag) { return true; } inline bool is_unit_solve(viennacl::linalg::unit_upper_tag) { return true; } template<typename TagT> bool is_upper_solve(TagT const & tag) { return false; } inline bool is_upper_solve(viennacl::linalg::upper_tag) { return true; } inline bool is_upper_solve(viennacl::linalg::unit_upper_tag) { return true; } template<typename Matrix1T, typename Matrix2T, typename SolverTagT> void inplace_solve_impl(Matrix1T const & A, Matrix2T & B, SolverTagT const & tag) { typedef typename viennacl::result_of::cpu_value_type<Matrix1T>::type value_type; dim3 threads(128); dim3 grid(B.size2()); if (is_upper_solve(tag)) { matrix_matrix_upper_solve_kernel<<<grid,threads>>>(viennacl::cuda_arg(A), static_cast<unsigned int>(viennacl::traits::start1(A)), static_cast<unsigned int>(viennacl::traits::start2(A)), static_cast<unsigned int>(viennacl::traits::stride1(A)), static_cast<unsigned int>(viennacl::traits::stride2(A)), static_cast<unsigned int>(viennacl::traits::size1(A)), static_cast<unsigned int>(viennacl::traits::size2(A)), static_cast<unsigned int>(viennacl::traits::internal_size1(A)), static_cast<unsigned int>(viennacl::traits::internal_size2(A)), bool(A.row_major()), viennacl::cuda_arg(B), static_cast<unsigned int>(viennacl::traits::start1(B)), static_cast<unsigned int>(viennacl::traits::start2(B)), static_cast<unsigned int>(viennacl::traits::stride1(B)), static_cast<unsigned int>(viennacl::traits::stride2(B)), static_cast<unsigned int>(viennacl::traits::size1(B)), static_cast<unsigned int>(viennacl::traits::size2(B)), static_cast<unsigned int>(viennacl::traits::internal_size1(B)), static_cast<unsigned int>(viennacl::traits::internal_size2(B)), bool(B.row_major()), is_unit_solve(tag) ); } else { matrix_matrix_lower_solve_kernel<<<grid,threads>>>(viennacl::cuda_arg(A), static_cast<unsigned int>(viennacl::traits::start1(A)), static_cast<unsigned int>(viennacl::traits::start2(A)), static_cast<unsigned int>(viennacl::traits::stride1(A)), static_cast<unsigned int>(viennacl::traits::stride2(A)), static_cast<unsigned int>(viennacl::traits::size1(A)), static_cast<unsigned int>(viennacl::traits::size2(A)), static_cast<unsigned int>(viennacl::traits::internal_size1(A)), static_cast<unsigned int>(viennacl::traits::internal_size2(A)), bool(A.row_major()), viennacl::cuda_arg(B), static_cast<unsigned int>(viennacl::traits::start1(B)), static_cast<unsigned int>(viennacl::traits::start2(B)), static_cast<unsigned int>(viennacl::traits::stride1(B)), static_cast<unsigned int>(viennacl::traits::stride2(B)), static_cast<unsigned int>(viennacl::traits::size1(B)), static_cast<unsigned int>(viennacl::traits::size2(B)), static_cast<unsigned int>(viennacl::traits::internal_size1(B)), static_cast<unsigned int>(viennacl::traits::internal_size2(B)), bool(B.row_major()), is_unit_solve(tag) ); } } } // // Note: By convention, all size checks are performed in the calling frontend. No need to double-check here. // ////////////////// triangular solver ////////////////////////////////////// /** @brief Direct inplace solver for triangular systems with multiple right hand sides, i.e. A \ B (MATLAB notation). * * @param A The system matrix * @param B The matrix of row vectors, where the solution is directly written to * @param tag Solver tag for identifying the respective triangular solver */ template<typename NumericT, typename SolverTagT> void inplace_solve(matrix_base<NumericT> const & A, matrix_base<NumericT> & B, SolverTagT tag) { detail::inplace_solve_impl(A, B, tag); } // // Solve on vector // template<typename NumericT> __global__ void triangular_substitute_inplace_row_kernel( NumericT const * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, NumericT * v, unsigned int v_start, unsigned int v_inc, unsigned int v_size, unsigned int options) { NumericT temp; unsigned int unit_diagonal_flag = (options & (1 << 0)); unsigned int is_lower_solve = (options & (1 << 2)); unsigned int row; for (unsigned int rows_processed = 0; rows_processed < A_size1; ++rows_processed) //Note: A required to be square { row = is_lower_solve ? rows_processed : ((A_size1 - rows_processed) - 1); if (!unit_diagonal_flag) { __syncthreads(); if (threadIdx.x == 0) v[row * v_inc + v_start] /= A[(row * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; } __syncthreads(); temp = v[row * v_inc + v_start]; for (int elim = (is_lower_solve ? (row + threadIdx.x + 1) : threadIdx.x); elim < (is_lower_solve ? A_size1 : row); elim += blockDim.x) v[elim * v_inc + v_start] -= temp * A[(elim * A_inc1 + A_start1) * A_internal_size2 + (row * A_inc2 + A_start2)]; } } template<typename NumericT> __global__ void triangular_substitute_inplace_col_kernel( NumericT const * A, unsigned int A_start1, unsigned int A_start2, unsigned int A_inc1, unsigned int A_inc2, unsigned int A_size1, unsigned int A_size2, unsigned int A_internal_size1, unsigned int A_internal_size2, NumericT * v, unsigned int v_start, unsigned int v_inc, unsigned int v_size, unsigned int options) { NumericT temp; unsigned int unit_diagonal_flag = (options & (1 << 0)); unsigned int is_lower_solve = (options & (1 << 2)); unsigned int row; for (unsigned int rows_processed = 0; rows_processed < A_size1; ++rows_processed) //Note: A required to be square { row = is_lower_solve ? rows_processed : ((A_size1 - rows_processed) - 1); if (!unit_diagonal_flag) { __syncthreads(); if (threadIdx.x == 0) v[row * v_inc + v_start] /= A[(row * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; } __syncthreads(); temp = v[row * v_inc + v_start]; for (int elim = (is_lower_solve ? (row + threadIdx.x + 1) : threadIdx.x); elim < (is_lower_solve ? A_size1 : row); elim += blockDim.x) v[elim * v_inc + v_start] -= temp * A[(elim * A_inc1 + A_start1) + (row * A_inc2 + A_start2) * A_internal_size1]; } } namespace detail { inline unsigned int get_option_for_solver_tag(viennacl::linalg::upper_tag) { return 0; } inline unsigned int get_option_for_solver_tag(viennacl::linalg::unit_upper_tag) { return (1 << 0); } inline unsigned int get_option_for_solver_tag(viennacl::linalg::lower_tag) { return (1 << 2); } inline unsigned int get_option_for_solver_tag(viennacl::linalg::unit_lower_tag) { return (1 << 2) | (1 << 0); } template<typename MatrixT, typename VectorT> void inplace_solve_vector_impl(MatrixT const & mat, VectorT & vec, unsigned int options) { typedef typename viennacl::result_of::cpu_value_type<MatrixT>::type value_type; if (mat.row_major()) { triangular_substitute_inplace_row_kernel<<<1, 128>>>(viennacl::cuda_arg(mat), static_cast<unsigned int>(viennacl::traits::start1(mat)), static_cast<unsigned int>(viennacl::traits::start2(mat)), static_cast<unsigned int>(viennacl::traits::stride1(mat)), static_cast<unsigned int>(viennacl::traits::stride2(mat)), static_cast<unsigned int>(viennacl::traits::size1(mat)), static_cast<unsigned int>(viennacl::traits::size2(mat)), static_cast<unsigned int>(viennacl::traits::internal_size1(mat)), static_cast<unsigned int>(viennacl::traits::internal_size2(mat)), viennacl::cuda_arg(vec), static_cast<unsigned int>(viennacl::traits::start(vec)), static_cast<unsigned int>(viennacl::traits::stride(vec)), static_cast<unsigned int>(viennacl::traits::size(vec)), options ); } else { triangular_substitute_inplace_col_kernel<<<1, 128>>>(viennacl::cuda_arg(mat), static_cast<unsigned int>(viennacl::traits::start1(mat)), static_cast<unsigned int>(viennacl::traits::start2(mat)), static_cast<unsigned int>(viennacl::traits::stride1(mat)), static_cast<unsigned int>(viennacl::traits::stride2(mat)), static_cast<unsigned int>(viennacl::traits::size1(mat)), static_cast<unsigned int>(viennacl::traits::size2(mat)), static_cast<unsigned int>(viennacl::traits::internal_size1(mat)), static_cast<unsigned int>(viennacl::traits::internal_size2(mat)), viennacl::cuda_arg(vec), static_cast<unsigned int>(viennacl::traits::start(vec)), static_cast<unsigned int>(viennacl::traits::stride(vec)), static_cast<unsigned int>(viennacl::traits::size(vec)), options ); } } } /** @brief Direct inplace solver for dense triangular systems (non-transposed version) * * @param mat The system matrix proxy * @param vec The load vector, where the solution is directly written to */ template<typename NumericT, typename SolverTagT> void inplace_solve(matrix_base<NumericT> const & mat, vector_base<NumericT> & vec, SolverTagT) { unsigned int options = detail::get_option_for_solver_tag(SolverTagT()); detail::inplace_solve_vector_impl(mat, vec, options); } } } } #endif
69a8bc2974a635b1c9a88b8030021dd9b9902ff4.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=64 --gridDim=64 --no-inline #include <hip/hip_runtime.h> #include <stdio.h> #include <assert.h> #define DIM 2 //64 #define N 2// DIM*DIM typedef struct { float x,y,z,w; } myfloat4; __global__ void k(float * i0) { myfloat4 f4; f4.x = 2; i0[threadIdx.x + blockDim.x*blockIdx.x] = f4.x; } int main(){ float *a; float *dev_a; int size = N*sizeof(float); hipMalloc((void**)&dev_a, size); a = (float*)malloc(size); for (int i = 0; i < N; i++) a[i] = 5; hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice); //k<<<DIM,DIM>>>(dev_a); ESBMC_verify_kernel_f(k,1,N,dev_a); hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) { assert(a[i] == 2); } free(a); hipFree(dev_a); return 0; }
69a8bc2974a635b1c9a88b8030021dd9b9902ff4.cu
//pass //--blockDim=64 --gridDim=64 --no-inline #include <cuda.h> #include <stdio.h> #include <assert.h> #define DIM 2 //64 #define N 2// DIM*DIM typedef struct { float x,y,z,w; } myfloat4; __global__ void k(float * i0) { myfloat4 f4; f4.x = 2; i0[threadIdx.x + blockDim.x*blockIdx.x] = f4.x; } int main(){ float *a; float *dev_a; int size = N*sizeof(float); cudaMalloc((void**)&dev_a, size); a = (float*)malloc(size); for (int i = 0; i < N; i++) a[i] = 5; cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice); //k<<<DIM,DIM>>>(dev_a); ESBMC_verify_kernel_f(k,1,N,dev_a); cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { assert(a[i] == 2); } free(a); cudaFree(dev_a); return 0; }
matMultGPU.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <iostream> using namespace std; hipEvent_t start, stop; void startKernelTime (void) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); } void stopKernelTime (void) { hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << " ms have elapsed for the CUDA execution" << endl; } void checkCUDAError (const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { cerr << "Cuda error: " << msg << ", " << hipGetErrorString( err) << endl; exit(-1); } } void printMatrix(int N, char c, float *M) { cout << c << endl; for (int i = 0; i < N; i++) { cout << endl; for (int j = 0; j < N; j++) { cout << M[i * N + j] << " "; } } cout << "\n\n"; } void fillMatrices(float *A, float *B, int N) { for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { A[i*N+j] = ((float)rand()) / ((float)RAND_MAX); B[i*N+j] = 1; } } } __global__ void matMultKernel_ijk(int N, float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) { C[i * N + j] = 0; for (int k = 0; k < N; k++) { C[i * N + j] += A[i * N + k] * B[k * N + j]; } } } __global__ void matMultKernel_ikj(int N, float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && k < N/* && k >= 1 */) { for (int j = 0; j < N; j++) { atomicAdd(&C[i*N+j],A[i * N + k] * B[k * N + j]); // C[i * N + j] += A[i * N + k] * B[k * N + j]; } } } void matMultGPU(int N, float *A, float *B, float *C) { int tam = 16; dim3 blocksPerGrid(tam, tam); //Garantir que sero utilizados todos SMX dim3 threadsPerBlock((N+tam*tam-1)/(tam*tam), (N+tam*tam-1)/(tam*tam)); // declare variable with size of the array in bytes int bytes = N * N * sizeof(float); // pointers to the device memory float *dA, *dB, *dC; // allocate the memory on the device hipMalloc((void**) &dA, bytes); hipMalloc((void**) &dB, bytes); hipMalloc((void**) &dC, bytes); startKernelTime(); checkCUDAError("mem allocation"); // copy inputs to the device hipMemcpy(dA, A, bytes, hipMemcpyHostToDevice); hipMemcpy(dB, B, bytes, hipMemcpyHostToDevice); hipMemcpy(dC, C, bytes, hipMemcpyHostToDevice); /** Temporrio **/ checkCUDAError("memcpy h->d"); // launch the kernel hipLaunchKernelGGL(( matMultKernel_ikj) , dim3(threadsPerBlock), dim3(blocksPerGrid) , 0, 0, N, dA, dB, dC); // matMultKernel_ijk <<< threadsPerBlock, blocksPerGrid >>> (N, dA, dB, dC); checkCUDAError("kernel invocation"); // copy the output to the host hipDeviceSynchronize(); hipMemcpy(C, dC, bytes, hipMemcpyDeviceToHost); checkCUDAError("memcpy d->h"); stopKernelTime(); // free the device memory hipFree(dA); hipFree(dB); hipFree(dC); checkCUDAError("mem free"); } int main(int argc, char const *argv[]) { if (argc == 1) { cout << "Insira o tamanho da matriz" << endl; exit(1); } int const N = atoi(argv[1]); float A[N * N]; float B[N * N]; float C[N * N]; fillMatrices(A, B, N); for (int i = 0; i < N * N; i++) { C[i] = 0; } matMultGPU(N, A, B, C); // printMatrix(N, 'A', A); // printMatrix(N, 'B', B); // printMatrix(N, 'C', C); }
matMultGPU.cu
#include <cstdlib> #include <iostream> using namespace std; cudaEvent_t start, stop; void startKernelTime (void) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); } void stopKernelTime (void) { cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << " ms have elapsed for the CUDA execution" << endl; } void checkCUDAError (const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { cerr << "Cuda error: " << msg << ", " << cudaGetErrorString( err) << endl; exit(-1); } } void printMatrix(int N, char c, float *M) { cout << c << endl; for (int i = 0; i < N; i++) { cout << endl; for (int j = 0; j < N; j++) { cout << M[i * N + j] << " "; } } cout << "\n\n"; } void fillMatrices(float *A, float *B, int N) { for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { A[i*N+j] = ((float)rand()) / ((float)RAND_MAX); B[i*N+j] = 1; } } } __global__ void matMultKernel_ijk(int N, float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) { C[i * N + j] = 0; for (int k = 0; k < N; k++) { C[i * N + j] += A[i * N + k] * B[k * N + j]; } } } __global__ void matMultKernel_ikj(int N, float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && k < N/* && k >= 1 */) { for (int j = 0; j < N; j++) { atomicAdd(&C[i*N+j],A[i * N + k] * B[k * N + j]); // C[i * N + j] += A[i * N + k] * B[k * N + j]; } } } void matMultGPU(int N, float *A, float *B, float *C) { int tam = 16; dim3 blocksPerGrid(tam, tam); //Garantir que serão utilizados todos SMX dim3 threadsPerBlock((N+tam*tam-1)/(tam*tam), (N+tam*tam-1)/(tam*tam)); // declare variable with size of the array in bytes int bytes = N * N * sizeof(float); // pointers to the device memory float *dA, *dB, *dC; // allocate the memory on the device cudaMalloc((void**) &dA, bytes); cudaMalloc((void**) &dB, bytes); cudaMalloc((void**) &dC, bytes); startKernelTime(); checkCUDAError("mem allocation"); // copy inputs to the device cudaMemcpy(dA, A, bytes, cudaMemcpyHostToDevice); cudaMemcpy(dB, B, bytes, cudaMemcpyHostToDevice); cudaMemcpy(dC, C, bytes, cudaMemcpyHostToDevice); /** Temporário **/ checkCUDAError("memcpy h->d"); // launch the kernel matMultKernel_ikj <<< threadsPerBlock, blocksPerGrid >>> (N, dA, dB, dC); // matMultKernel_ijk <<< threadsPerBlock, blocksPerGrid >>> (N, dA, dB, dC); checkCUDAError("kernel invocation"); // copy the output to the host cudaThreadSynchronize(); cudaMemcpy(C, dC, bytes, cudaMemcpyDeviceToHost); checkCUDAError("memcpy d->h"); stopKernelTime(); // free the device memory cudaFree(dA); cudaFree(dB); cudaFree(dC); checkCUDAError("mem free"); } int main(int argc, char const *argv[]) { if (argc == 1) { cout << "Insira o tamanho da matriz" << endl; exit(1); } int const N = atoi(argv[1]); float A[N * N]; float B[N * N]; float C[N * N]; fillMatrices(A, B, N); for (int i = 0; i < N * N; i++) { C[i] = 0; } matMultGPU(N, A, B, C); // printMatrix(N, 'A', A); // printMatrix(N, 'B', B); // printMatrix(N, 'C', C); }
a52ebe05f730d698cf0db02738b1798ed2612e1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef block_size_x #define block_size_x 32 #endif #ifndef block_size_y #define block_size_y 16 #endif /** * This file contains the CUDA kernel for converting an image into * a grayscale array of floats. Scaling factors used are: * 0.299 r + 0.587 g + 0.114 b * * @author Ben van Werkhoven <[email protected]> * @version 0.1 */ extern "C" { // __global__ void grayscale(int h, int w, float* output, uchar3* input); __global__ void grayscale(int h, int w, float* output, char* input); } /* * Naive grayscale kernel * * Bytes go in, floats come out, alpha is ignored * * gridDim.x = w / block_size_x (ceiled) * gridDim.y = h / block_size_y (ceiled) */ //__global__ void grayscale(int h, int w, float* output, uchar3* input) { __global__ void grayscale(int h, int w, float* output, char* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; uchar3 *c3_input = (uchar3 *)input; if (j < w && i < h) { uchar3 c = c3_input[i*w+j]; // float b = (float) input[(i*w+j) * 3 + 0] & 0xFFFF; // float g = (float) input[(i*w+j) * 3 + 1] & 0xFFFF; // float r = (float) input[(i*w+j) * 3 + 2] & 0xFFFF; output[i*w+j] = 0.299f*c.z + 0.587f*c.y + 0.114f*c.x; } }
a52ebe05f730d698cf0db02738b1798ed2612e1c.cu
/* * Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef block_size_x #define block_size_x 32 #endif #ifndef block_size_y #define block_size_y 16 #endif /** * This file contains the CUDA kernel for converting an image into * a grayscale array of floats. Scaling factors used are: * 0.299 r + 0.587 g + 0.114 b * * @author Ben van Werkhoven <[email protected]> * @version 0.1 */ extern "C" { // __global__ void grayscale(int h, int w, float* output, uchar3* input); __global__ void grayscale(int h, int w, float* output, char* input); } /* * Naive grayscale kernel * * Bytes go in, floats come out, alpha is ignored * * gridDim.x = w / block_size_x (ceiled) * gridDim.y = h / block_size_y (ceiled) */ //__global__ void grayscale(int h, int w, float* output, uchar3* input) { __global__ void grayscale(int h, int w, float* output, char* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; uchar3 *c3_input = (uchar3 *)input; if (j < w && i < h) { uchar3 c = c3_input[i*w+j]; // float b = (float) input[(i*w+j) * 3 + 0] & 0xFFFF; // float g = (float) input[(i*w+j) * 3 + 1] & 0xFFFF; // float r = (float) input[(i*w+j) * 3 + 2] & 0xFFFF; output[i*w+j] = 0.299f*c.z + 0.587f*c.y + 0.114f*c.x; } }
2296d1419fd969c6251ec0fd1c190ab8dfb20121.hip
// !!! This is a file automatically generated by hipify!!! #include "THHHalf.h" #include "THHThrustAllocator.cuh" #include <thrust/transform.h> #include <thrust/execution_policy.h> struct __half2floatOp { __device__ float operator()(half v) { return __half2float(v); } }; struct __float2halfOp { __device__ half operator()(float v) { return __float2half(v); } }; void THCFloat2Half(THCState *state, half *out, float *in, ptrdiff_t len) { THCThrustAllocator thrustAlloc(state); thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __float2halfOp()); } void THCHalf2Float(THCState *state, float *out, half *in, ptrdiff_t len) { THCThrustAllocator thrustAlloc(state); thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __half2floatOp()); } THC_EXTERNC int THC_nativeHalfInstructions(THCState *state) { hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); // CC 5.3+ return (prop->major > 5 || (prop->major == 5 && prop->minor == 3)); } THC_EXTERNC int THC_fastHalfInstructions(THCState *state) { hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); // Check for CC 6.0 only (corresponds to P100) return (prop->major == 6 && prop->minor == 0); }
2296d1419fd969c6251ec0fd1c190ab8dfb20121.cu
#include "THCHalf.h" #include "THCThrustAllocator.cuh" #include <thrust/transform.h> #include <thrust/execution_policy.h> struct __half2floatOp { __device__ float operator()(half v) { return __half2float(v); } }; struct __float2halfOp { __device__ half operator()(float v) { return __float2half(v); } }; void THCFloat2Half(THCState *state, half *out, float *in, ptrdiff_t len) { THCThrustAllocator thrustAlloc(state); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __float2halfOp()); } void THCHalf2Float(THCState *state, float *out, half *in, ptrdiff_t len) { THCThrustAllocator thrustAlloc(state); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __half2floatOp()); } THC_EXTERNC int THC_nativeHalfInstructions(THCState *state) { cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); // CC 5.3+ return (prop->major > 5 || (prop->major == 5 && prop->minor == 3)); } THC_EXTERNC int THC_fastHalfInstructions(THCState *state) { cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); // Check for CC 6.0 only (corresponds to P100) return (prop->major == 6 && prop->minor == 0); }
d02a3af50ecca6f0a4164802898a540c1f5d080d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> using namespace std; // includes, project //#include <cutil.h> // includes, kernels #include "matrixmul_kernel.hip" // include helper header #include "tiledMatMult.h" //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); void ExtractFromPadded(Matrix M, const Matrix& Mpadded); bool CompareResults(float* A, float* B, int elements, float eps); int ReadFile(Matrix* M, char* file_name); bool ReadParams(int* params, int size, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); #define MAT_MAX_SIZE 4096 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; // Timing variables float cpu; hipEvent_t cpu_start, cpu_end; hipEventCreate(&cpu_start); hipEventCreate(&cpu_end); srand(52); if(argc != 5 && argc != 4 & argc != 3) { // Allocate and initialize the matrices int dummy; dummy = rand() % MAT_MAX_SIZE; int Mh = (dummy==0? 1: dummy); dummy = rand() % MAT_MAX_SIZE; int Mw = (dummy==0? 1: dummy); M = AllocateMatrix(Mh, Mw, 1); dummy = rand() % MAT_MAX_SIZE; int Nw = (dummy==0? 1: dummy); N = AllocateMatrix(Mw, Nw, 1); P = AllocateMatrix(Mh, Nw, 0); } else if (argc == 3) { int x = atoi(argv[1]); int y = atoi(argv[2]); M = AllocateMatrix(x, y, 1); N = AllocateMatrix(x, y, 1); P = AllocateMatrix(x, y, 0); } else { // Allocate and read in matrices from disk int* params = (int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; ReadParams(params, data_read, argv[1]); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } // M * N on the device MatrixMulOnDevice(M, N, P); printf("GPU computation complete\n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); printf("Start CPU computation\n"); hipEventRecord(cpu_start, NULL); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); hipEventRecord(cpu_end, NULL); hipEventSynchronize(cpu_end); hipEventElapsedTime(&cpu, cpu_start, cpu_end); printf("CPU computation complete in %f ms\n", cpu); // in this case check if the result is equivalent to the expected soluion bool res = CompareResults(reference.elements, P.elements, P.height*P.width, 0.01f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("Dimension M[height,width]: %d %d\n", M.height, M.width); printf("Dimension N[height,width]: %d %d\n", N.height, N.width); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// // Multiply on the device //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix Munpadded, const Matrix Nunpadded, Matrix Punpadded) { // I'm going to take care of the padding here... Matrix M = PaddedMatrix(Munpadded, BLOCK_SIZE, 1); Matrix N = PaddedMatrix(Nunpadded, BLOCK_SIZE, 1); Matrix P = PaddedMatrix(Punpadded, BLOCK_SIZE, 0); // Timing variables float incl, excl; hipEvent_t incl_start, incl_end; hipEvent_t excl_start, excl_end; hipEventCreate(&incl_start); hipEventCreate(&incl_end); hipEventCreate(&excl_start); hipEventCreate(&excl_end); // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); Matrix Nd = AllocateDeviceMatrix(N); Matrix Pd = AllocateDeviceMatrix(P); // Setup the execution configuration // Come up with the number of blocks you need to call int bx = (Pd.width % BLOCK_SIZE == 0) ? Pd.width / BLOCK_SIZE : Pd.width / BLOCK_SIZE + 1; int by = (Pd.height % BLOCK_SIZE == 0) ? Pd.height / BLOCK_SIZE : Pd.height / BLOCK_SIZE + 1; dim3 grid(bx, by, 1); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); hipEventRecord(incl_start, NULL); CopyToDeviceMatrix(Md, M); CopyToDeviceMatrix(Nd, N); CopyToDeviceMatrix(Pd, Punpadded); // Clear memory // Launch the device computation threads hipEventRecord(excl_start, NULL); hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid), dim3(block), 0, 0, Md, Nd, Pd); hipEventRecord(excl_end, NULL); hipEventSynchronize(excl_end); hipEventElapsedTime(&excl, excl_start, excl_end); // Read P from the device and then extract the submatrix with the result CopyFromDeviceMatrix(P, Pd); hipEventRecord(incl_end, NULL); hipEventSynchronize(incl_end); hipEventElapsedTime(&incl, incl_start, incl_end); ExtractFromPadded(Punpadded, P); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); // Free the helper padded matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); printf("GPU Time:\n Exclusive = %f ms\n Inclusive = %f ms\n", excl, incl); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } //compare the data stored in two arrays on the host bool CompareResults(float* A, float* B, int elements, float eps) { for(unsigned int i = 0; i < elements; i++){ float error = A[i]-B[i]; if(error>eps){ return false; } } return true; } bool ReadParams(int* params, int size, char* file_name){ ifstream ifile(file_name); int i=0; for(int i=0; i<size; i++){ if(ifile.fail()==false){ ifile>>params[i]; } } return (i==size)? 1:0; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; std::ifstream ifile(file_name); unsigned int i = 0; for(; i < data_read; i++){ ifile>>M->elements[i]; } ifile.close(); return (i==data_read)? 0:1; } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { std::ofstream ofile(file_name); for(unsigned int i = 0; i < M.width*M.height; i++){ ofile<<M.elements[i]; } ofile.close(); } // Given a matrix M, produce a padded matrix that has both dimensions a // multiple of BLKSZ. The elements of the original M matrix can be // copied over to the new padded matrix provided the flag copyEntries // is not zero. Note that the assumption is that M.pitch <= M.width; Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries) { Matrix Mpadded; int dummy = (M.height - 1)/BLKSZ + 1; Mpadded.height = dummy*BLKSZ; dummy = (M.width - 1)/BLKSZ + 1; Mpadded.width = dummy*BLKSZ; Mpadded.pitch = M.width; Mpadded.elements = (float*) calloc(Mpadded.width*Mpadded.height, sizeof(float)); // copy entries of original matrix only if asked to if( copyEntries ) { for( int i=0; i<M.height; i++) { memcpy(&Mpadded.elements[i*Mpadded.width], &M.elements[i*M.width], M.width*sizeof(float)); } } return Mpadded; } // The submatrix of dimensions M.width by M.height of Mpadded is copied over // from Mpadded into M. Note that the assumption is that M.pitch <= M.width; void ExtractFromPadded(Matrix M, const Matrix& Mpadded) { if( Mpadded.pitch!=M.width ) { printf("Error extracting data from padded matrix: Number of rows %d, %d\n", Mpadded.pitch, M.width); exit(1); } if( Mpadded.height<M.height ) { printf("Error extracting data from padded matrix: Height too small%d, %d\n", Mpadded.height, M.height); exit(1); } for( int i=0; i<M.height; i++) { memcpy(&M.elements[i*M.width], &Mpadded.elements[i*Mpadded.width], M.width*sizeof(float)); } return; }
d02a3af50ecca6f0a4164802898a540c1f5d080d.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> using namespace std; // includes, project //#include <cutil.h> // includes, kernels #include "matrixmul_kernel.cu" // include helper header #include "tiledMatMult.h" //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); void ExtractFromPadded(Matrix M, const Matrix& Mpadded); bool CompareResults(float* A, float* B, int elements, float eps); int ReadFile(Matrix* M, char* file_name); bool ReadParams(int* params, int size, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); #define MAT_MAX_SIZE 4096 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; // Timing variables float cpu; cudaEvent_t cpu_start, cpu_end; cudaEventCreate(&cpu_start); cudaEventCreate(&cpu_end); srand(52); if(argc != 5 && argc != 4 & argc != 3) { // Allocate and initialize the matrices int dummy; dummy = rand() % MAT_MAX_SIZE; int Mh = (dummy==0? 1: dummy); dummy = rand() % MAT_MAX_SIZE; int Mw = (dummy==0? 1: dummy); M = AllocateMatrix(Mh, Mw, 1); dummy = rand() % MAT_MAX_SIZE; int Nw = (dummy==0? 1: dummy); N = AllocateMatrix(Mw, Nw, 1); P = AllocateMatrix(Mh, Nw, 0); } else if (argc == 3) { int x = atoi(argv[1]); int y = atoi(argv[2]); M = AllocateMatrix(x, y, 1); N = AllocateMatrix(x, y, 1); P = AllocateMatrix(x, y, 0); } else { // Allocate and read in matrices from disk int* params = (int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; ReadParams(params, data_read, argv[1]); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } // M * N on the device MatrixMulOnDevice(M, N, P); printf("GPU computation complete\n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); printf("Start CPU computation\n"); cudaEventRecord(cpu_start, NULL); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); cudaEventRecord(cpu_end, NULL); cudaEventSynchronize(cpu_end); cudaEventElapsedTime(&cpu, cpu_start, cpu_end); printf("CPU computation complete in %f ms\n", cpu); // in this case check if the result is equivalent to the expected soluion bool res = CompareResults(reference.elements, P.elements, P.height*P.width, 0.01f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("Dimension M[height,width]: %d %d\n", M.height, M.width); printf("Dimension N[height,width]: %d %d\n", N.height, N.width); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// // Multiply on the device //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix Munpadded, const Matrix Nunpadded, Matrix Punpadded) { // I'm going to take care of the padding here... Matrix M = PaddedMatrix(Munpadded, BLOCK_SIZE, 1); Matrix N = PaddedMatrix(Nunpadded, BLOCK_SIZE, 1); Matrix P = PaddedMatrix(Punpadded, BLOCK_SIZE, 0); // Timing variables float incl, excl; cudaEvent_t incl_start, incl_end; cudaEvent_t excl_start, excl_end; cudaEventCreate(&incl_start); cudaEventCreate(&incl_end); cudaEventCreate(&excl_start); cudaEventCreate(&excl_end); // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); Matrix Nd = AllocateDeviceMatrix(N); Matrix Pd = AllocateDeviceMatrix(P); // Setup the execution configuration // Come up with the number of blocks you need to call int bx = (Pd.width % BLOCK_SIZE == 0) ? Pd.width / BLOCK_SIZE : Pd.width / BLOCK_SIZE + 1; int by = (Pd.height % BLOCK_SIZE == 0) ? Pd.height / BLOCK_SIZE : Pd.height / BLOCK_SIZE + 1; dim3 grid(bx, by, 1); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); cudaEventRecord(incl_start, NULL); CopyToDeviceMatrix(Md, M); CopyToDeviceMatrix(Nd, N); CopyToDeviceMatrix(Pd, Punpadded); // Clear memory // Launch the device computation threads cudaEventRecord(excl_start, NULL); MatrixMulKernel<<<grid, block>>>(Md, Nd, Pd); cudaEventRecord(excl_end, NULL); cudaEventSynchronize(excl_end); cudaEventElapsedTime(&excl, excl_start, excl_end); // Read P from the device and then extract the submatrix with the result CopyFromDeviceMatrix(P, Pd); cudaEventRecord(incl_end, NULL); cudaEventSynchronize(incl_end); cudaEventElapsedTime(&incl, incl_start, incl_end); ExtractFromPadded(Punpadded, P); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); // Free the helper padded matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); printf("GPU Time:\n Exclusive = %f ms\n Inclusive = %f ms\n", excl, incl); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } //compare the data stored in two arrays on the host bool CompareResults(float* A, float* B, int elements, float eps) { for(unsigned int i = 0; i < elements; i++){ float error = A[i]-B[i]; if(error>eps){ return false; } } return true; } bool ReadParams(int* params, int size, char* file_name){ ifstream ifile(file_name); int i=0; for(int i=0; i<size; i++){ if(ifile.fail()==false){ ifile>>params[i]; } } return (i==size)? 1:0; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; std::ifstream ifile(file_name); unsigned int i = 0; for(; i < data_read; i++){ ifile>>M->elements[i]; } ifile.close(); return (i==data_read)? 0:1; } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { std::ofstream ofile(file_name); for(unsigned int i = 0; i < M.width*M.height; i++){ ofile<<M.elements[i]; } ofile.close(); } // Given a matrix M, produce a padded matrix that has both dimensions a // multiple of BLKSZ. The elements of the original M matrix can be // copied over to the new padded matrix provided the flag copyEntries // is not zero. Note that the assumption is that M.pitch <= M.width; Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries) { Matrix Mpadded; int dummy = (M.height - 1)/BLKSZ + 1; Mpadded.height = dummy*BLKSZ; dummy = (M.width - 1)/BLKSZ + 1; Mpadded.width = dummy*BLKSZ; Mpadded.pitch = M.width; Mpadded.elements = (float*) calloc(Mpadded.width*Mpadded.height, sizeof(float)); // copy entries of original matrix only if asked to if( copyEntries ) { for( int i=0; i<M.height; i++) { memcpy(&Mpadded.elements[i*Mpadded.width], &M.elements[i*M.width], M.width*sizeof(float)); } } return Mpadded; } // The submatrix of dimensions M.width by M.height of Mpadded is copied over // from Mpadded into M. Note that the assumption is that M.pitch <= M.width; void ExtractFromPadded(Matrix M, const Matrix& Mpadded) { if( Mpadded.pitch!=M.width ) { printf("Error extracting data from padded matrix: Number of rows %d, %d\n", Mpadded.pitch, M.width); exit(1); } if( Mpadded.height<M.height ) { printf("Error extracting data from padded matrix: Height too small%d, %d\n", Mpadded.height, M.height); exit(1); } for( int i=0; i<M.height; i++) { memcpy(&M.elements[i*M.width], &Mpadded.elements[i*Mpadded.width], M.width*sizeof(float)); } return; }
f2995a4977d5bf38be8fce91fb8abd0aaa80e842.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "dali/operators/generic/one_hot.h" #include "dali/operators/generic/one_hot.cuh" namespace dali { class OneHotGPU : public OneHot<GPUBackend> { public: explicit OneHotGPU(const OpSpec &spec) : OneHot<GPUBackend>(spec) { scratch_mem_.set_type<uint8_t>(); } ~OneHotGPU() override = default; USE_OPERATOR_MEMBERS(); protected: void RunImpl(workspace_t<GPUBackend> &ws) override; bool SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) override; template<typename OutputType, typename InputType> void RunImplTyped(workspace_t<GPUBackend> &ws, int placement_axis); private: std::vector<detail::SampleDesc> sample_descs_; Tensor<GPUBackend> scratch_mem_; int recent_n_samples_ = 0; }; bool OneHotGPU::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) { const auto &input = ws.template Input<GPUBackend>(0); int num_samples = input.shape().num_samples(); if (num_samples != recent_n_samples_) { recent_n_samples_ = num_samples; int64_t samples_size = num_samples * sizeof(detail::SampleDesc); scratch_mem_.Resize({samples_size}); } sample_descs_.clear(); sample_descs_.reserve(num_samples); return OneHot<GPUBackend>::SetupImpl(output_desc, ws); } void OneHotGPU::RunImpl(workspace_t<GPUBackend> &ws) { const auto &input = ws.Input<GPUBackend>(0); auto &output = ws.Output<GPUBackend>(0); int output_sample_dim = output.shape().sample_dim(); int placement_axis = get_placement_axis(output_sample_dim); output.SetLayout(GetOutputLayout(ws, placement_axis, output_sample_dim)); TYPE_SWITCH(input.type(), type2id, InputType, ONE_HOT_TYPES, ( TYPE_SWITCH(output_type_, type2id, OutputType, ONE_HOT_TYPES, ( RunImplTyped<OutputType, InputType>(ws, placement_axis); ), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT ), DALI_FAIL(make_string("Unsupported input type: ", input.type())); ); // NOLINT } template <typename OutputType, typename InputType> void OneHotGPU::RunImplTyped(workspace_t<GPUBackend> &ws, int axis) { const auto &input = ws.Input<GPUBackend>(0); auto &output = ws.Output<GPUBackend>(0); int num_samples = input.shape().num_samples(); uint64_t max_out_vol = 1; const auto &shape = output.shape(); for (int sample_id = 0; sample_id < num_samples; ++sample_id) { detail::SampleDesc sample; auto output_shape = shape.tensor_shape_span(sample_id); auto outer_vol = volume(output_shape.begin(), output_shape.begin() + axis); sample.inner_vol = volume(output_shape.begin() + axis + 1, output_shape.end()); sample.inner_vol_classes = sample.inner_vol * num_classes_; sample.output_vol = outer_vol * sample.inner_vol_classes; sample.out = output.mutable_tensor<OutputType>(sample_id); sample.in = input.tensor<InputType>(sample_id); sample_descs_.push_back(sample); max_out_vol = ::max(max_out_vol, sample.output_vol); } auto stream = ws.stream(); scratch_mem_.Copy(sample_descs_, stream); const auto *scratch_mem_gpu = scratch_mem_.data<detail::SampleDesc>(); const int block = 256; auto grid = detail::gridHelper(max_out_vol, num_samples, block); hipLaunchKernelGGL(( detail::PopulateOneHot<OutputType, InputType>), dim3(grid), dim3(block), 0, stream, on_value_, off_value_, scratch_mem_gpu); } DALI_REGISTER_OPERATOR(OneHot, OneHotGPU, GPU); } // namespace dali
f2995a4977d5bf38be8fce91fb8abd0aaa80e842.cu
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "dali/operators/generic/one_hot.h" #include "dali/operators/generic/one_hot.cuh" namespace dali { class OneHotGPU : public OneHot<GPUBackend> { public: explicit OneHotGPU(const OpSpec &spec) : OneHot<GPUBackend>(spec) { scratch_mem_.set_type<uint8_t>(); } ~OneHotGPU() override = default; USE_OPERATOR_MEMBERS(); protected: void RunImpl(workspace_t<GPUBackend> &ws) override; bool SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) override; template<typename OutputType, typename InputType> void RunImplTyped(workspace_t<GPUBackend> &ws, int placement_axis); private: std::vector<detail::SampleDesc> sample_descs_; Tensor<GPUBackend> scratch_mem_; int recent_n_samples_ = 0; }; bool OneHotGPU::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) { const auto &input = ws.template Input<GPUBackend>(0); int num_samples = input.shape().num_samples(); if (num_samples != recent_n_samples_) { recent_n_samples_ = num_samples; int64_t samples_size = num_samples * sizeof(detail::SampleDesc); scratch_mem_.Resize({samples_size}); } sample_descs_.clear(); sample_descs_.reserve(num_samples); return OneHot<GPUBackend>::SetupImpl(output_desc, ws); } void OneHotGPU::RunImpl(workspace_t<GPUBackend> &ws) { const auto &input = ws.Input<GPUBackend>(0); auto &output = ws.Output<GPUBackend>(0); int output_sample_dim = output.shape().sample_dim(); int placement_axis = get_placement_axis(output_sample_dim); output.SetLayout(GetOutputLayout(ws, placement_axis, output_sample_dim)); TYPE_SWITCH(input.type(), type2id, InputType, ONE_HOT_TYPES, ( TYPE_SWITCH(output_type_, type2id, OutputType, ONE_HOT_TYPES, ( RunImplTyped<OutputType, InputType>(ws, placement_axis); ), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT ), DALI_FAIL(make_string("Unsupported input type: ", input.type())); ); // NOLINT } template <typename OutputType, typename InputType> void OneHotGPU::RunImplTyped(workspace_t<GPUBackend> &ws, int axis) { const auto &input = ws.Input<GPUBackend>(0); auto &output = ws.Output<GPUBackend>(0); int num_samples = input.shape().num_samples(); uint64_t max_out_vol = 1; const auto &shape = output.shape(); for (int sample_id = 0; sample_id < num_samples; ++sample_id) { detail::SampleDesc sample; auto output_shape = shape.tensor_shape_span(sample_id); auto outer_vol = volume(output_shape.begin(), output_shape.begin() + axis); sample.inner_vol = volume(output_shape.begin() + axis + 1, output_shape.end()); sample.inner_vol_classes = sample.inner_vol * num_classes_; sample.output_vol = outer_vol * sample.inner_vol_classes; sample.out = output.mutable_tensor<OutputType>(sample_id); sample.in = input.tensor<InputType>(sample_id); sample_descs_.push_back(sample); max_out_vol = std::max(max_out_vol, sample.output_vol); } auto stream = ws.stream(); scratch_mem_.Copy(sample_descs_, stream); const auto *scratch_mem_gpu = scratch_mem_.data<detail::SampleDesc>(); const int block = 256; auto grid = detail::gridHelper(max_out_vol, num_samples, block); detail::PopulateOneHot<OutputType, InputType><<<grid, block, 0, stream>>>( on_value_, off_value_, scratch_mem_gpu); } DALI_REGISTER_OPERATOR(OneHot, OneHotGPU, GPU); } // namespace dali
db9c88175ae11b4d9633088beb15dde331e805e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "pcuditas/gpu/gpu_array.cu" template<class ParticleT, class EnvironmentT> __global__ void update_forces_shared_kernel( ParticleT *particles, int n_particles, EnvironmentT *env_ptr) { extern __shared__ ParticleT particles_sh[]; using vector_t = typename ParticleT::vector_type; EnvironmentT env = (*env_ptr); for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < n_particles; i += blockDim.x*gridDim.x) { auto force = vector_t::zero(); auto self_pos = particles[i].position; // For every other particle for (int j=0; j<n_particles; j += blockDim.x) { // Copy to shared memory particles_sh[threadIdx.x] = particles[j + threadIdx.x]; __syncthreads(); // Reduce on block for(size_t k=0; k<blockDim.x; k++) { auto other_pos = particles[k].position; auto dr = env.distance_vector(other_pos, self_pos); auto f_ij = (i != k) ? ParticleT::force_law(dr) : vector_t::zero(); force += f_ij; } __syncthreads(); } // Save the results particles[i].force = force; } } template<class ParticleT, class EnvironmentT> void update_forces_shared( gpu_array<ParticleT> &particles, gpu_object<EnvironmentT> &env, unsigned int block_size = 1024, unsigned int threads_per_block = 32) { // Launch the kernel. As you can see we are not copying memory from CPU to GPU // as you would normally do with hipMemcpy(), as we don't need to! The // vectors live in GPU already so we just need to know where they start (GPU // pointer) and pass it to the kernel. unsigned int shared_memory_size = threads_per_block * sizeof(ParticleT); hipLaunchKernelGGL(( update_forces_shared_kernel), dim3(block_size), dim3(threads_per_block), shared_memory_size, 0, particles.gpu_pointer(), particles.size, env.gpu_pointer() ); }
db9c88175ae11b4d9633088beb15dde331e805e9.cu
#pragma once #include "pcuditas/gpu/gpu_array.cu" template<class ParticleT, class EnvironmentT> __global__ void update_forces_shared_kernel( ParticleT *particles, int n_particles, EnvironmentT *env_ptr) { extern __shared__ ParticleT particles_sh[]; using vector_t = typename ParticleT::vector_type; EnvironmentT env = (*env_ptr); for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < n_particles; i += blockDim.x*gridDim.x) { auto force = vector_t::zero(); auto self_pos = particles[i].position; // For every other particle for (int j=0; j<n_particles; j += blockDim.x) { // Copy to shared memory particles_sh[threadIdx.x] = particles[j + threadIdx.x]; __syncthreads(); // Reduce on block for(size_t k=0; k<blockDim.x; k++) { auto other_pos = particles[k].position; auto dr = env.distance_vector(other_pos, self_pos); auto f_ij = (i != k) ? ParticleT::force_law(dr) : vector_t::zero(); force += f_ij; } __syncthreads(); } // Save the results particles[i].force = force; } } template<class ParticleT, class EnvironmentT> void update_forces_shared( gpu_array<ParticleT> &particles, gpu_object<EnvironmentT> &env, unsigned int block_size = 1024, unsigned int threads_per_block = 32) { // Launch the kernel. As you can see we are not copying memory from CPU to GPU // as you would normally do with cudaMemcpy(), as we don't need to! The // vectors live in GPU already so we just need to know where they start (GPU // pointer) and pass it to the kernel. unsigned int shared_memory_size = threads_per_block * sizeof(ParticleT); update_forces_shared_kernel<<<block_size, threads_per_block, shared_memory_size>>>( particles.gpu_pointer(), particles.size, env.gpu_pointer() ); }
300fa87ddade014a602945fa1b50475abebe52c1.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include <stdio.h> #include <stdexcept> #include "poollayer.h" #include "hip/hip_runtime.h" #include "math.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> __global__ void PoolLayer_Forward_cu(double *previousLayerForward, double *out, int* backwardData, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth) { int index = blockIdx.x + (blockIdx.y * width) + (blockIdx.z * width * height); for (int y = 0; y < stride; y++) { for (int x = 0; x < stride; x++) { int previousLayerIndex = x + (blockIdx.x * stride) + (((blockIdx.y * stride) + y) * previousLayerWidth) + (blockIdx.z * previousLayerWidth * previousLayerHeight); double val = previousLayerForward[previousLayerIndex]; if (val > out[index]) { out[index] = val; backwardData[index] = previousLayerIndex; } } } } __global__ void PoolLayer_Backward_cu(double* nextlayerBackward, double *out, int* backwardData) { int index = backwardData[blockIdx.x]; out[index] += nextlayerBackward[blockIdx.x]; } void PoolLayer_Forward(double *previousLayerForward, double *output, int* backwardData, int nodeCount, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth) { // TODO: For simplicity just use a simple block calculation dim3 blocks(width, height, depth); // TODO: For simplicity just use one thread for now! hipLaunchKernelGGL(( PoolLayer_Forward_cu) , dim3(blocks), dim3(1) , 0, 0, previousLayerForward, output, backwardData, width, height, depth, stride, previousLayerWidth, previousLayerHeight, previousLayerDepth); LayerSynchronize(); } void PoolLayer_Backward(double* nextlayerBackward, double *output, int* backwardData, int nodeCount) { hipLaunchKernelGGL(( PoolLayer_Backward_cu) , dim3(nodeCount), dim3(1) , 0, 0, nextlayerBackward, output, backwardData); LayerSynchronize(); }
300fa87ddade014a602945fa1b50475abebe52c1.cu
#pragma once #include <stdio.h> #include <stdexcept> #include "poollayer.h" #include "cuda_runtime.h" #include "math.h" #include "device_launch_parameters.h" #include <device_functions.h> #include <cuda.h> #include <cuda_runtime_api.h> __global__ void PoolLayer_Forward_cu(double *previousLayerForward, double *out, int* backwardData, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth) { int index = blockIdx.x + (blockIdx.y * width) + (blockIdx.z * width * height); for (int y = 0; y < stride; y++) { for (int x = 0; x < stride; x++) { int previousLayerIndex = x + (blockIdx.x * stride) + (((blockIdx.y * stride) + y) * previousLayerWidth) + (blockIdx.z * previousLayerWidth * previousLayerHeight); double val = previousLayerForward[previousLayerIndex]; if (val > out[index]) { out[index] = val; backwardData[index] = previousLayerIndex; } } } } __global__ void PoolLayer_Backward_cu(double* nextlayerBackward, double *out, int* backwardData) { int index = backwardData[blockIdx.x]; out[index] += nextlayerBackward[blockIdx.x]; } void PoolLayer_Forward(double *previousLayerForward, double *output, int* backwardData, int nodeCount, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth) { // TODO: For simplicity just use a simple block calculation dim3 blocks(width, height, depth); // TODO: For simplicity just use one thread for now! PoolLayer_Forward_cu <<<blocks, 1 >>>(previousLayerForward, output, backwardData, width, height, depth, stride, previousLayerWidth, previousLayerHeight, previousLayerDepth); LayerSynchronize(); } void PoolLayer_Backward(double* nextlayerBackward, double *output, int* backwardData, int nodeCount) { PoolLayer_Backward_cu <<<nodeCount, 1 >>>(nextlayerBackward, output, backwardData); LayerSynchronize(); }
93dc6db535920d6349d52128e22262cd45892914.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h> #include <thrust/system/detail/generic/reduce_by_key.h> #include <thrust/scan.h> #include <thrust/remove.h> #include <thrust/iterator/transform_iterator.h> #include <thrust_wrapper.h> #include <error.h> #include <cutil.h> #include <util.h> #include <types.h> #include <misc.h> #include <hash_workspace.h> #include <matrix_io.h> #include <device_properties.h> #include <amgx_types/util.h> namespace amgx { namespace aggregation { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <sm_utils.inl> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 #include <hash_containers_sm70.inl> // Included inside the namespace to solve name collisions. static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 ) { int offset = -1; if ( utils::lane_id() == 0 ) { offset = atomicAdd( queue, count ); } return utils::shfl( offset, 0 ); } #else #include <hash_containers_sm35.inl> // Included inside the namespace to solve name collisions. static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 ) { int offset = -1; if ( utils::lane_id() == 0 ) { offset = atomicAdd( queue, count ); } return utils::shfl( offset, 0 ); } #endif /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool COUNT_ONLY > __global__ __launch_bounds__( CTA_SIZE ) void compute_sparsity_kernel( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *aggregates, int *Ac_rows, int *Ac_cols, int *Ac_pos, const int gmem_size, int *g_keys, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // Make sure we have to proceed. if ( COUNT_ONLY ) { volatile int *status = reinterpret_cast<volatile int *>( wk_status ); if ( set.has_failed() || *status != 0 ) { return; } } // Clear the set. set.clear(); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Iterate over the columns of R. for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { int local_k = k + lane_id_div_num_threads; // Is it an active thread. bool is_active_k = local_k < num_rows; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_a_row_id = utils::shfl( a_row_id, local_k ); // Load the range of the row of B. int a_col_it = 0, a_col_end = 0; if ( is_active_k ) { a_col_it = A_rows[uniform_a_row_id + 0]; a_col_end = A_rows[uniform_a_row_id + 1]; } // Iterate over the range of columns of B. for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW ) { int a_col_id = -1, a_agg_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; a_agg_id = aggregates[a_col_id]; } if ( HAS_DIAG && a_agg_id == r_row_id ) { a_agg_id = -1; } set.insert( a_agg_id, COUNT_ONLY ? wk_status : NULL ); } } } // Store the results. if ( COUNT_ONLY ) { int count = set.compute_size(); if ( lane_id == 0 ) { Ac_rows[r_row_id] = count; } } else { int ac_col_it = Ac_rows[r_row_id]; set.store_with_positions( &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] ); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 8 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_1x1( const int R_num_rows, const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, int gmem_size, int *g_keys, Value_type *g_vals, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The hash values stored in shared memory. __shared__ Value_type s_vals[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_map<int, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id * SMEM_SIZE ], &g_keys[r_row_id * gmem_size ], &s_vals[warp_id * SMEM_SIZE], &g_vals[r_row_id * gmem_size ], gmem_size ); // Loop over rows of A. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the output row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. TODO: Make sure it's needed. I don't think it is!!!! map.clear(); // Populate the map. map.load( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] ); int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // The diagonal. Value_type r_diag(types::util<Value_type>::get_zero()); // _iterate over the columns of A to build C_hat. for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } // Update the diagonal (if needed). if ( HAS_DIAG && is_active ) { r_diag = r_diag + A_vals[A_diag[a_row_id]]; } const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { int local_k = k + lane_id_div_num_threads; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_a_row_id = utils::shfl( a_row_id, local_k ); // The range of the row of B. int a_col_it = 0, a_col_end = 0; if ( local_k < num_rows ) { a_col_it = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 0] ); a_col_end = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 1] ); } // Iterate over the range of columns of B. for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW ) { // Load columns and values. int a_col_id = -1; Value_type a_value(types::util<Value_type>::get_zero()); if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; a_value = A_vals[a_col_it]; } // Find the aggregate. int a_agg_id = -1; if ( a_col_it < a_col_end ) { a_agg_id = aggregates[a_col_id]; } // Update the diag/hash map. if ( HAS_DIAG && a_agg_id == r_row_id ) { r_diag = r_diag + a_value; a_agg_id = -1; } map.insert( a_agg_id, a_value, NULL ); // It won't insert. Only update. } } } // Update the diagonal. if ( HAS_DIAG ) { r_diag = utils::warp_reduce<1, utils::Add>( r_diag ); if ( lane_id == 0 ) { Ac_vals[Ac_diag[r_row_id]] = r_diag; } } // Store the results. int count = ac_col_end - ac_col_it; if ( count == 0 ) { continue; } map.store( count, &Ac_vals[ac_col_it] ); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG > #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __global__ __launch_bounds__( CTA_SIZE, 8 ) #else __global__ __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_4x4( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, const int gmem_size, int *g_keys, int *g_idx, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // Constants. const int lane_id_div_16 = lane_id / 16; const int lane_id_mod_16 = lane_id % 16; const int warp_offset = 16 * lane_id_div_16; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // My index. Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] ); // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. set.clear(true); // Populate the index. set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false ); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Diagonal value (each half warp stores a diagonal element). Value_type ac_diag(types::util<Value_type>::get_zero()); // Iterate over the columns of R. for ( r_col_it += lane_id_div_16 ; utils::any(r_col_it < r_col_end) ; r_col_it += 2 ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } // Update the diagonal if needed. if ( HAS_DIAG && is_active ) { ac_diag = ac_diag + A_vals[16 * A_diag[a_row_id] + lane_id_mod_16]; } // Load the range of the row of A. int a_col_begin = 0, a_col_end = 0; if ( is_active ) { a_col_begin = A_rows[a_row_id + 0]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the range of columns of B. for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += 16 ) { int a_col_it = a_col_begin + lane_id_mod_16; // Each thread loads a column-ID and an aggregate. int a_col_id = -1, ac_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols [a_col_it]; ac_col_id = aggregates[a_col_id]; } // Each thread uses the hashed index to find the position associated with the aggregate. int key = ac_col_id; if ( HAS_DIAG && ac_col_id == r_row_id ) { key = -1; } int ac_idx = ac_col_it + set.find_index( key, index, false ); // Iterate over the 16 items. for ( int k = 0 ; k < 16 ; ++k ) { int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k ); int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k ); // Early loop exit. if ( utils::all( uniform_ac_col == -1 ) ) { break; } // The index of the item. const int uniform_a_col_it = a_col_begin + k; // Load the value if possible. Value_type a_value(types::util<Value_type>::get_zero()); if ( uniform_a_col_it < a_col_end ) { a_value = A_vals[16 * uniform_a_col_it + lane_id_mod_16]; } // Proceed diagonal if needed. if ( HAS_DIAG && uniform_ac_col == r_row_id ) { ac_diag = ac_diag + a_value; uniform_ac_col = -1; } // Get the id of the column computed by the other half warp. int other_ac_col = utils::shfl_xor( uniform_ac_col, 16 ); // If both half warps want to write to the same location, we have a conflict!!! int are_fighting = uniform_ac_col == other_ac_col; // Reduce the two values to a single one. if ( uniform_ac_col != -1 && are_fighting ) { a_value = a_value + utils::shfl_xor( a_value, 16 ); } // If the two half warps fight, only one can be the winner... It's the 1st half!!! int is_winner = !are_fighting || lane_id_div_16 == 0; // Update the value. if ( uniform_ac_col != -1 && is_winner ) { Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] = Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] + a_value; } } } } if ( HAS_DIAG ) { ac_diag = ac_diag + utils::shfl_xor( ac_diag, 16 ); if ( lane_id_div_16 == 0 ) { Ac_vals[16 * Ac_diag[r_row_id] + lane_id_mod_16] = ac_diag; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM > #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __global__ __launch_bounds__( CTA_SIZE, 8 ) #else __global__ __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_NxN( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, const int gmem_size, int *g_keys, int *g_idx, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int T_WARP = FORCE_DETERMINISM ? 1 : WARP_SIZE / NxN; const int NUM_ITEMS_PER_WARP = T_WARP == 0 ? 1 : T_WARP; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; const int warp_offset = NxN * lane_id_div_NxN; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // My index. Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] ); // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. set.clear(true); // Populate the index. set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false ); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Diagonal value (each half warp stores a diagonal element). Value_type ac_diag(types::util<Value_type>::get_zero()); // Iterate over the columns of R. for ( r_col_it += lane_id_div_NxN ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP; // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } // Update the diagonal if needed. if ( HAS_DIAG && is_active ) { ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN]; } // Load the range of the row of A. int a_col_begin = 0, a_col_end = 0; if ( is_active ) { a_col_begin = A_rows[a_row_id + 0]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the range of columns of B. for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN ) { int a_col_it = a_col_begin + lane_id_mod_NxN; // Is it active. const bool is_active_k = a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP; // Each thread loads a column-ID and an aggregate. int a_col_id = -1, ac_col_id = -1; if ( is_active_k ) { a_col_id = A_cols [a_col_it]; ac_col_id = aggregates[a_col_id]; } // Each thread uses the hashed index to find the position associated with the aggregate. int key = ac_col_id; if ( HAS_DIAG && ac_col_id == r_row_id ) { key = -1; } int ac_idx = ac_col_it + set.find_index( key, index, false ); // Iterate over the NxN items. for ( int k = 0 ; k < NxN ; ++k ) { int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k ); int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k ); if ( lane_id_div_NxN >= NUM_ITEMS_PER_WARP ) { uniform_ac_col = -1; uniform_ac_idx = -1; } // Early loop exit. if ( utils::all( uniform_ac_col == -1 ) ) { break; } // The index of the item. const int uniform_a_col_it = a_col_begin + k; // Load the value if possible. Value_type a_value(types::util<Value_type>::get_zero()); if ( uniform_a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP ) { a_value = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } // Update the diagonal if it is a diagonal term. if ( HAS_DIAG && uniform_ac_col == r_row_id ) { ac_diag = ac_diag + a_value; uniform_ac_col = -1; } // Update the value. if ( uniform_ac_col != -1 ) { utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + lane_id_mod_NxN], a_value ); } } } } if ( HAS_DIAG ) { if ( !FORCE_DETERMINISM ) { ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag ); } if ( lane_id_div_NxN == 0 ) { Ac_vals[NxN * Ac_diag[r_row_id] + lane_id_mod_NxN] = ac_diag; } } } } // when blocksize is larger than warp size template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM, int NUM_BLOCK_ITERS_PER_WARP> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __global__ __launch_bounds__( CTA_SIZE, 8 ) #else __global__ __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_NxN_large( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, const int gmem_size, int *g_keys, int *g_idx, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. Let's be chill here and take 1 per warp for large blocks const int NUM_ITEMS_PER_WARP = 1; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // My index. Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] ); // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. set.clear(true); // Populate the index. set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false ); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Diagonal value (each half warp stores a diagonal element). Value_type ac_diag(types::util<Value_type>::get_zero()); // Iterate over the columns of R. for ( ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP ) { // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = R_cols[r_col_it]; // Update the diagonal if needed. if ( HAS_DIAG ) { ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id]; } // Load the range of the row of A. int a_col_begin = A_rows[a_row_id + 0]; int a_col_end = A_rows[a_row_id + 1]; // Iterate over the range of columns of B. for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN ) { int a_col_it = a_col_begin + lane_id; // Is it active. const bool is_active_k = a_col_it < a_col_end; // Each thread loads a column-ID and an aggregate. int a_col_id = -1, ac_col_id = -1; if ( is_active_k ) { a_col_id = A_cols [a_col_it]; ac_col_id = aggregates[a_col_id]; } // Each thread uses the hashed index to find the position associated with the aggregate. int key = ac_col_id; if ( HAS_DIAG && ac_col_id == r_row_id ) { key = -1; } int ac_idx = ac_col_it + set.find_index( key, index, false ); // Iterate over the NxN items. for ( int k = 0 ; k < NxN ; ++k ) { int uniform_ac_col = utils::shfl( ac_col_id, k ); int uniform_ac_idx = utils::shfl( ac_idx, k ); // Early loop exit. if ( utils::all( uniform_ac_col == -1 ) ) { break; } // The index of the item. const int uniform_a_col_it = a_col_begin + k; // iterate through the block #pragma unroll for (int i = 0; i < NUM_BLOCK_ITERS_PER_WARP; i++) { // Load the value if possible. Value_type a_value(types::util<Value_type>::get_zero()); if ( uniform_a_col_it < a_col_end && (WARP_SIZE * i + lane_id) < NxN ) { a_value = A_vals[NxN * uniform_a_col_it + WARP_SIZE * i + lane_id]; } // Update the diagonal if it is a diagonal term. if ( HAS_DIAG && uniform_ac_col == r_row_id ) { ac_diag = ac_diag + a_value; uniform_ac_col = -1; } // Update the value. if ( uniform_ac_col != -1 && (WARP_SIZE * i + lane_id) < NxN) { utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + WARP_SIZE * i + lane_id], a_value ); } } } } } if ( HAS_DIAG ) { if ( !FORCE_DETERMINISM ) { ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag ); } Ac_vals[NxN * Ac_diag[r_row_id] + lane_id] = ac_diag; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// enum { WARP_SIZE = 32, SMEM_SIZE = 128 }; template< int CTA_SIZE, bool HAS_DIAG, bool COUNT_ONLY, typename Workspace > static void compute_sparsity_dispatch( Workspace &hash_wk, const int R_num_rows, const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *aggregates, int *Ac_rows, int *Ac_cols, int *Ac_pos ) { hipDeviceProp_t props = getDeviceProperties(); int GRID_SIZE = (props.major >= 7) ? 1024 : 128; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; int *h_status; thrust::global_thread_handle::hipHostMalloc((void **) &h_status, sizeof(int)); int *h_work_offset; thrust::global_thread_handle::hipHostMalloc((void **) &h_work_offset, sizeof(int)); int attempt = 0; bool warning_printed = 0; for ( bool done = false ; !done && attempt < 10 ; ++attempt ) { // Double the amount of GMEM (if needed). if ( attempt > 0 ) { if (!warning_printed) { amgx_printf("WARNING: Used settings might result in degraded performance for the MG coarsener for this matrix.\n"); amgx_printf("WARNING: You might want to try different selector or MG algorithm for better performance.\n"); warning_printed = 1; } hash_wk.expand(); } // Reset the status. int *p_status = h_status; *p_status = 0; hipMemcpyAsync( hash_wk.get_status(), p_status, sizeof(int), hipMemcpyHostToDevice, thrust::global_thread_handle::get_stream() ); cudaCheckError(); // Reset the work queue. int *p_work_offset = h_work_offset; *p_work_offset = GRID_SIZE * NUM_WARPS; hipMemcpyAsync( hash_wk.get_work_queue(), p_work_offset, sizeof(int), hipMemcpyHostToDevice, thrust::global_thread_handle::get_stream() ); cudaCheckError(); // Launch the kernel. hipLaunchKernelGGL(( compute_sparsity_kernel<8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, HAS_DIAG, COUNT_ONLY>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, thrust::global_thread_handle::get_stream(), R_num_rows, R_rows, R_cols, A_rows, A_cols, aggregates, Ac_rows, Ac_cols, Ac_pos, hash_wk.get_gmem_size(), hash_wk.get_keys(), hash_wk.get_work_queue(), hash_wk.get_status() ); cudaCheckError(); // Read the result from count_non_zeroes. hipMemcpyAsync( p_status, hash_wk.get_status(), sizeof(int), hipMemcpyDeviceToHost, thrust::global_thread_handle::get_stream() ); hipStreamSynchronize(thrust::global_thread_handle::get_stream()); done = (*p_status == 0); cudaCheckError(); } thrust::global_thread_handle::hipHostFree(h_status); thrust::global_thread_handle::hipHostFree(h_work_offset); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int CTA_SIZE, bool HAS_DIAG, typename Workspace, typename Value_type > static void fill_A_dispatch( Workspace &hash_wk, const int block_size, const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, bool force_determinism ) { hipDeviceProp_t props = getDeviceProperties(); int GRID_SIZE = (props.major >= 7) ? 1024 : 128; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; int work_offset = GRID_SIZE * NUM_WARPS; hipMemcpyAsync( hash_wk.get_work_queue(), &work_offset, sizeof(int), hipMemcpyHostToDevice, thrust::global_thread_handle::get_stream() ); cudaCheckError(); // Launch the kernel. switch ( block_size ) { case 1: hipLaunchKernelGGL(( fill_A_kernel_1x1<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), hash_wk.get_vals(), hash_wk.get_work_queue() ); break; case 2: hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 2, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 3: hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 3, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 4: if ( force_determinism ) hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 4, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, true>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); else hipLaunchKernelGGL(( fill_A_kernel_4x4<Value_type, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, thrust::global_thread_handle::get_stream(), R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 5: hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 5, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 8: hipLaunchKernelGGL(( fill_A_kernel_NxN_large<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 2>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 10: hipLaunchKernelGGL(( fill_A_kernel_NxN_large<Value_type, 10, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 4>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; default: FatalError( "LOW_DEG not implemented for this block size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void LowDegCoarseAGenerator<TemplateConfig<AMGX_device, V, M, I> >::computeAOperator( const Matrix_d &A, Matrix_d &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates ) { if ( A.get_block_dimx() != A.get_block_dimy() ) { FatalError( "LowDegCoarseAGenerator implemented for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } // The matrix Ac will be modified. Ac.set_initialized(0); // Is the diagonal stored separately?? const int diag_prop = A.hasProps(DIAG); // Allocate a workspace for hashing. typedef TemplateConfig<AMGX_device, V, M, I> TConfig_d; hipDeviceProp_t props = getDeviceProperties(); int grid_size = (props.major >= 7) ? 1024 : 128; Hash_Workspace<TConfig_d, int> hash_wk(true, grid_size); // Compute row offsets of Ac. Ac.addProps(CSR); Ac.set_num_rows( num_aggregates ); Ac.set_num_cols( num_aggregates ); Ac.row_offsets.resize( num_aggregates + 1 ); // Compute the number of non-zero elements per row of Ac. const int CTA_SIZE = 128; if ( diag_prop ) compute_sparsity_dispatch<CTA_SIZE, true, true>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), NULL, NULL ); else compute_sparsity_dispatch<CTA_SIZE, false, true>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), NULL, NULL ); cudaCheckError(); // Compute the number of non-zeroes. thrust_wrapper::exclusive_scan( Ac.row_offsets.begin(), Ac.row_offsets.end(), Ac.row_offsets.begin() ); cudaCheckError(); int nonzero_blocks = Ac.row_offsets[num_aggregates]; if ( diag_prop ) { Ac.addProps(DIAG); } if ( A.is_matrix_singleGPU() ) { Ac.resize( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), !diag_prop ); } else { //have 3% more nz for storage Ac.resize_spare( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), 1.0 ); if ( diag_prop ) { Ac.computeDiagonal(); } } // Vector to store the positions in the hash table. device_vector_alloc<int> Ac_pos(nonzero_blocks); // Compute the sparsity pattern of the rows of Ac. if ( diag_prop ) compute_sparsity_dispatch<CTA_SIZE, true, false>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() )); else compute_sparsity_dispatch<CTA_SIZE, false, false>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() )); cudaCheckError(); // Reset values if needed. if ( A.get_block_dimy() != 1 ) { thrust::fill( Ac.values.begin(), Ac.values.end(), types::util<ValueType>::get_zero() ); cudaCheckError(); } // Compute values. if ( diag_prop ) { fill_A_dispatch<CTA_SIZE, true>( hash_wk, A.get_block_dimy(), num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() ), Ac.diag.raw(), Ac.values.raw(), this->m_force_determinism ); } else { fill_A_dispatch<CTA_SIZE, false>( hash_wk, A.get_block_dimy(), num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() ), Ac.diag.raw(), Ac.values.raw(), this->m_force_determinism ); } cudaCheckError(); // Update the diagonal if needed. if ( Ac.is_matrix_singleGPU() ) { Ac.computeDiagonal(); } cudaCheckError(); // Finalize the modification. Ac.set_initialized(1); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void LowDegCoarseAGenerator<TemplateConfig<AMGX_host, V, M, I> >::computeAOperator( const Matrix_h &h_A, Matrix_h &h_Ac, const IVector &h_aggregates, const IVector &h_R_row_offsets, const IVector &h_R_column_indices, const int num_aggregates ) { h_Ac.set_initialized(0); IVector rows; IVector inds; typename Matrix_h::MVector vals; typename Matrix_h::MVector diag; int num_nnz = 0; int diag_prop = h_A.hasProps(DIAG); for ( int row = 0; row < num_aggregates; row++ ) { for ( int col = 0; col < num_aggregates; col++ ) { int fill = 0; typename Matrix_h::MVector cur(h_A.get_block_size(), types::util<typename Matrix_h::value_type>::get_zero()); for ( int rc = h_R_row_offsets[row]; rc < h_R_row_offsets[row + 1]; rc++ ) { int j = h_R_column_indices[rc]; for ( int ac = h_A.row_offsets[j]; ac < h_A.row_offsets[j + 1] + diag_prop; ac++ ) { int k = (ac == h_A.row_offsets[j + 1]) ? j : h_A.col_indices[ac]; for ( int q = h_R_row_offsets[col]; q < h_R_row_offsets[col + 1]; q++ ) if ( k == h_R_column_indices[q] ) { fill = 1; int val_idx = (ac == h_A.row_offsets[j + 1]) ? h_A.get_num_nz() + j : ac; for ( int v = 0; v < h_A.get_block_size(); v++) { cur[v] = cur[v] + h_A.values[val_idx * h_A.get_block_size() + v]; } } } } if ( fill ) { if ( row != col || !diag_prop ) { inds.push_back(col); rows.push_back(row); num_nnz++; for ( int v = 0; v < h_A.get_block_size(); v++ ) { vals.push_back(cur[v]); } } else { for ( int v = 0; v < h_A.get_block_size(); v++ ) { diag.push_back(cur[v]); } } } } } rows.push_back(-1); // add diagonal to the end if ( diag_prop ) { for ( int v = 0; v < num_aggregates * h_A.get_block_size(); v++ ) { vals.push_back(diag[v]); } } else { // Add a zero at the end for (int v = 0; v < h_A.get_block_size(); v++) { vals.push_back(types::util<typename Matrix_h::value_type>::get_zero()); } } h_Ac.resize(num_aggregates, num_aggregates, num_nnz, h_A.get_block_dimx(), h_A.get_block_dimy(), 1); h_Ac.row_indices = rows; h_Ac.col_indices = inds; h_Ac.values = vals; h_Ac.addProps( CSR | ( diag_prop ? DIAG : 0 ) ); h_Ac.computeDiagonal(); h_Ac.set_initialized(1); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define AMGX_CASE_LINE(CASE) template class LowDegCoarseAGenerator<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace aggregation } // namespace amgx
93dc6db535920d6349d52128e22262cd45892914.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h> #include <thrust/system/detail/generic/reduce_by_key.h> #include <thrust/scan.h> #include <thrust/remove.h> #include <thrust/iterator/transform_iterator.h> #include <thrust_wrapper.h> #include <error.h> #include <cutil.h> #include <util.h> #include <types.h> #include <misc.h> #include <hash_workspace.h> #include <matrix_io.h> #include <device_properties.h> #include <amgx_types/util.h> namespace amgx { namespace aggregation { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <sm_utils.inl> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 #include <hash_containers_sm70.inl> // Included inside the namespace to solve name collisions. static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 ) { int offset = -1; if ( utils::lane_id() == 0 ) { offset = atomicAdd( queue, count ); } return utils::shfl( offset, 0 ); } #else #include <hash_containers_sm35.inl> // Included inside the namespace to solve name collisions. static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 ) { int offset = -1; if ( utils::lane_id() == 0 ) { offset = atomicAdd( queue, count ); } return utils::shfl( offset, 0 ); } #endif /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool COUNT_ONLY > __global__ __launch_bounds__( CTA_SIZE ) void compute_sparsity_kernel( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *aggregates, int *Ac_rows, int *Ac_cols, int *Ac_pos, const int gmem_size, int *g_keys, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // Make sure we have to proceed. if ( COUNT_ONLY ) { volatile int *status = reinterpret_cast<volatile int *>( wk_status ); if ( set.has_failed() || *status != 0 ) { return; } } // Clear the set. set.clear(); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Iterate over the columns of R. for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { int local_k = k + lane_id_div_num_threads; // Is it an active thread. bool is_active_k = local_k < num_rows; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_a_row_id = utils::shfl( a_row_id, local_k ); // Load the range of the row of B. int a_col_it = 0, a_col_end = 0; if ( is_active_k ) { a_col_it = A_rows[uniform_a_row_id + 0]; a_col_end = A_rows[uniform_a_row_id + 1]; } // Iterate over the range of columns of B. for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW ) { int a_col_id = -1, a_agg_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; a_agg_id = aggregates[a_col_id]; } if ( HAS_DIAG && a_agg_id == r_row_id ) { a_agg_id = -1; } set.insert( a_agg_id, COUNT_ONLY ? wk_status : NULL ); } } } // Store the results. if ( COUNT_ONLY ) { int count = set.compute_size(); if ( lane_id == 0 ) { Ac_rows[r_row_id] = count; } } else { int ac_col_it = Ac_rows[r_row_id]; set.store_with_positions( &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] ); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 8 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_1x1( const int R_num_rows, const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, int gmem_size, int *g_keys, Value_type *g_vals, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The hash values stored in shared memory. __shared__ Value_type s_vals[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_map<int, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id * SMEM_SIZE ], &g_keys[r_row_id * gmem_size ], &s_vals[warp_id * SMEM_SIZE], &g_vals[r_row_id * gmem_size ], gmem_size ); // Loop over rows of A. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the output row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. TODO: Make sure it's needed. I don't think it is!!!! map.clear(); // Populate the map. map.load( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] ); int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // The diagonal. Value_type r_diag(types::util<Value_type>::get_zero()); // _iterate over the columns of A to build C_hat. for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } // Update the diagonal (if needed). if ( HAS_DIAG && is_active ) { r_diag = r_diag + A_vals[A_diag[a_row_id]]; } const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { int local_k = k + lane_id_div_num_threads; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_a_row_id = utils::shfl( a_row_id, local_k ); // The range of the row of B. int a_col_it = 0, a_col_end = 0; if ( local_k < num_rows ) { a_col_it = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 0] ); a_col_end = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 1] ); } // Iterate over the range of columns of B. for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW ) { // Load columns and values. int a_col_id = -1; Value_type a_value(types::util<Value_type>::get_zero()); if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; a_value = A_vals[a_col_it]; } // Find the aggregate. int a_agg_id = -1; if ( a_col_it < a_col_end ) { a_agg_id = aggregates[a_col_id]; } // Update the diag/hash map. if ( HAS_DIAG && a_agg_id == r_row_id ) { r_diag = r_diag + a_value; a_agg_id = -1; } map.insert( a_agg_id, a_value, NULL ); // It won't insert. Only update. } } } // Update the diagonal. if ( HAS_DIAG ) { r_diag = utils::warp_reduce<1, utils::Add>( r_diag ); if ( lane_id == 0 ) { Ac_vals[Ac_diag[r_row_id]] = r_diag; } } // Store the results. int count = ac_col_end - ac_col_it; if ( count == 0 ) { continue; } map.store( count, &Ac_vals[ac_col_it] ); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG > #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __global__ __launch_bounds__( CTA_SIZE, 8 ) #else __global__ __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_4x4( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, const int gmem_size, int *g_keys, int *g_idx, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // Constants. const int lane_id_div_16 = lane_id / 16; const int lane_id_mod_16 = lane_id % 16; const int warp_offset = 16 * lane_id_div_16; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // My index. Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] ); // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. set.clear(true); // Populate the index. set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false ); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Diagonal value (each half warp stores a diagonal element). Value_type ac_diag(types::util<Value_type>::get_zero()); // Iterate over the columns of R. for ( r_col_it += lane_id_div_16 ; utils::any(r_col_it < r_col_end) ; r_col_it += 2 ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end; // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } // Update the diagonal if needed. if ( HAS_DIAG && is_active ) { ac_diag = ac_diag + A_vals[16 * A_diag[a_row_id] + lane_id_mod_16]; } // Load the range of the row of A. int a_col_begin = 0, a_col_end = 0; if ( is_active ) { a_col_begin = A_rows[a_row_id + 0]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the range of columns of B. for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += 16 ) { int a_col_it = a_col_begin + lane_id_mod_16; // Each thread loads a column-ID and an aggregate. int a_col_id = -1, ac_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols [a_col_it]; ac_col_id = aggregates[a_col_id]; } // Each thread uses the hashed index to find the position associated with the aggregate. int key = ac_col_id; if ( HAS_DIAG && ac_col_id == r_row_id ) { key = -1; } int ac_idx = ac_col_it + set.find_index( key, index, false ); // Iterate over the 16 items. for ( int k = 0 ; k < 16 ; ++k ) { int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k ); int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k ); // Early loop exit. if ( utils::all( uniform_ac_col == -1 ) ) { break; } // The index of the item. const int uniform_a_col_it = a_col_begin + k; // Load the value if possible. Value_type a_value(types::util<Value_type>::get_zero()); if ( uniform_a_col_it < a_col_end ) { a_value = A_vals[16 * uniform_a_col_it + lane_id_mod_16]; } // Proceed diagonal if needed. if ( HAS_DIAG && uniform_ac_col == r_row_id ) { ac_diag = ac_diag + a_value; uniform_ac_col = -1; } // Get the id of the column computed by the other half warp. int other_ac_col = utils::shfl_xor( uniform_ac_col, 16 ); // If both half warps want to write to the same location, we have a conflict!!! int are_fighting = uniform_ac_col == other_ac_col; // Reduce the two values to a single one. if ( uniform_ac_col != -1 && are_fighting ) { a_value = a_value + utils::shfl_xor( a_value, 16 ); } // If the two half warps fight, only one can be the winner... It's the 1st half!!! int is_winner = !are_fighting || lane_id_div_16 == 0; // Update the value. if ( uniform_ac_col != -1 && is_winner ) { Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] = Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] + a_value; } } } } if ( HAS_DIAG ) { ac_diag = ac_diag + utils::shfl_xor( ac_diag, 16 ); if ( lane_id_div_16 == 0 ) { Ac_vals[16 * Ac_diag[r_row_id] + lane_id_mod_16] = ac_diag; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM > #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __global__ __launch_bounds__( CTA_SIZE, 8 ) #else __global__ __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_NxN( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, const int gmem_size, int *g_keys, int *g_idx, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int T_WARP = FORCE_DETERMINISM ? 1 : WARP_SIZE / NxN; const int NUM_ITEMS_PER_WARP = T_WARP == 0 ? 1 : T_WARP; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; const int warp_offset = NxN * lane_id_div_NxN; // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // My index. Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] ); // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. set.clear(true); // Populate the index. set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false ); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Diagonal value (each half warp stores a diagonal element). Value_type ac_diag(types::util<Value_type>::get_zero()); // Iterate over the columns of R. for ( r_col_it += lane_id_div_NxN ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP ) { // Is it an active thread. const bool is_active = r_col_it < r_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP; // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = -1; if ( is_active ) { a_row_id = R_cols[r_col_it]; } // Update the diagonal if needed. if ( HAS_DIAG && is_active ) { ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN]; } // Load the range of the row of A. int a_col_begin = 0, a_col_end = 0; if ( is_active ) { a_col_begin = A_rows[a_row_id + 0]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the range of columns of B. for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN ) { int a_col_it = a_col_begin + lane_id_mod_NxN; // Is it active. const bool is_active_k = a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP; // Each thread loads a column-ID and an aggregate. int a_col_id = -1, ac_col_id = -1; if ( is_active_k ) { a_col_id = A_cols [a_col_it]; ac_col_id = aggregates[a_col_id]; } // Each thread uses the hashed index to find the position associated with the aggregate. int key = ac_col_id; if ( HAS_DIAG && ac_col_id == r_row_id ) { key = -1; } int ac_idx = ac_col_it + set.find_index( key, index, false ); // Iterate over the NxN items. for ( int k = 0 ; k < NxN ; ++k ) { int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k ); int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k ); if ( lane_id_div_NxN >= NUM_ITEMS_PER_WARP ) { uniform_ac_col = -1; uniform_ac_idx = -1; } // Early loop exit. if ( utils::all( uniform_ac_col == -1 ) ) { break; } // The index of the item. const int uniform_a_col_it = a_col_begin + k; // Load the value if possible. Value_type a_value(types::util<Value_type>::get_zero()); if ( uniform_a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP ) { a_value = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } // Update the diagonal if it is a diagonal term. if ( HAS_DIAG && uniform_ac_col == r_row_id ) { ac_diag = ac_diag + a_value; uniform_ac_col = -1; } // Update the value. if ( uniform_ac_col != -1 ) { utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + lane_id_mod_NxN], a_value ); } } } } if ( HAS_DIAG ) { if ( !FORCE_DETERMINISM ) { ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag ); } if ( lane_id_div_NxN == 0 ) { Ac_vals[NxN * Ac_diag[r_row_id] + lane_id_mod_NxN] = ac_diag; } } } } // when blocksize is larger than warp size template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM, int NUM_BLOCK_ITERS_PER_WARP> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __global__ __launch_bounds__( CTA_SIZE, 8 ) #else __global__ __launch_bounds__( CTA_SIZE, 8 ) #endif void fill_A_kernel_NxN_large( const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, const int gmem_size, int *g_keys, int *g_idx, int *wk_work_queue ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. Let's be chill here and take 1 per warp for large blocks const int NUM_ITEMS_PER_WARP = 1; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // First threads load the row IDs of A needed by the CTA... int r_row_id = blockIdx.x * NUM_WARPS + warp_id; // My index. Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] ); // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size ); // Loop over rows of R. for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) ) { // The indices of the row. int ac_col_it = Ac_rows[r_row_id + 0]; int ac_col_end = Ac_rows[r_row_id + 1]; // Clear the set first. set.clear(true); // Populate the index. set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false ); // Load the range of the row. int r_col_it = R_rows[r_row_id + 0]; int r_col_end = R_rows[r_row_id + 1]; // Diagonal value (each half warp stores a diagonal element). Value_type ac_diag(types::util<Value_type>::get_zero()); // Iterate over the columns of R. for ( ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP ) { // Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID. int a_row_id = R_cols[r_col_it]; // Update the diagonal if needed. if ( HAS_DIAG ) { ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id]; } // Load the range of the row of A. int a_col_begin = A_rows[a_row_id + 0]; int a_col_end = A_rows[a_row_id + 1]; // Iterate over the range of columns of B. for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN ) { int a_col_it = a_col_begin + lane_id; // Is it active. const bool is_active_k = a_col_it < a_col_end; // Each thread loads a column-ID and an aggregate. int a_col_id = -1, ac_col_id = -1; if ( is_active_k ) { a_col_id = A_cols [a_col_it]; ac_col_id = aggregates[a_col_id]; } // Each thread uses the hashed index to find the position associated with the aggregate. int key = ac_col_id; if ( HAS_DIAG && ac_col_id == r_row_id ) { key = -1; } int ac_idx = ac_col_it + set.find_index( key, index, false ); // Iterate over the NxN items. for ( int k = 0 ; k < NxN ; ++k ) { int uniform_ac_col = utils::shfl( ac_col_id, k ); int uniform_ac_idx = utils::shfl( ac_idx, k ); // Early loop exit. if ( utils::all( uniform_ac_col == -1 ) ) { break; } // The index of the item. const int uniform_a_col_it = a_col_begin + k; // iterate through the block #pragma unroll for (int i = 0; i < NUM_BLOCK_ITERS_PER_WARP; i++) { // Load the value if possible. Value_type a_value(types::util<Value_type>::get_zero()); if ( uniform_a_col_it < a_col_end && (WARP_SIZE * i + lane_id) < NxN ) { a_value = A_vals[NxN * uniform_a_col_it + WARP_SIZE * i + lane_id]; } // Update the diagonal if it is a diagonal term. if ( HAS_DIAG && uniform_ac_col == r_row_id ) { ac_diag = ac_diag + a_value; uniform_ac_col = -1; } // Update the value. if ( uniform_ac_col != -1 && (WARP_SIZE * i + lane_id) < NxN) { utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + WARP_SIZE * i + lane_id], a_value ); } } } } } if ( HAS_DIAG ) { if ( !FORCE_DETERMINISM ) { ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag ); } Ac_vals[NxN * Ac_diag[r_row_id] + lane_id] = ac_diag; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// enum { WARP_SIZE = 32, SMEM_SIZE = 128 }; template< int CTA_SIZE, bool HAS_DIAG, bool COUNT_ONLY, typename Workspace > static void compute_sparsity_dispatch( Workspace &hash_wk, const int R_num_rows, const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *aggregates, int *Ac_rows, int *Ac_cols, int *Ac_pos ) { cudaDeviceProp props = getDeviceProperties(); int GRID_SIZE = (props.major >= 7) ? 1024 : 128; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; int *h_status; thrust::global_thread_handle::cudaMallocHost((void **) &h_status, sizeof(int)); int *h_work_offset; thrust::global_thread_handle::cudaMallocHost((void **) &h_work_offset, sizeof(int)); int attempt = 0; bool warning_printed = 0; for ( bool done = false ; !done && attempt < 10 ; ++attempt ) { // Double the amount of GMEM (if needed). if ( attempt > 0 ) { if (!warning_printed) { amgx_printf("WARNING: Used settings might result in degraded performance for the MG coarsener for this matrix.\n"); amgx_printf("WARNING: You might want to try different selector or MG algorithm for better performance.\n"); warning_printed = 1; } hash_wk.expand(); } // Reset the status. int *p_status = h_status; *p_status = 0; cudaMemcpyAsync( hash_wk.get_status(), p_status, sizeof(int), cudaMemcpyHostToDevice, thrust::global_thread_handle::get_stream() ); cudaCheckError(); // Reset the work queue. int *p_work_offset = h_work_offset; *p_work_offset = GRID_SIZE * NUM_WARPS; cudaMemcpyAsync( hash_wk.get_work_queue(), p_work_offset, sizeof(int), cudaMemcpyHostToDevice, thrust::global_thread_handle::get_stream() ); cudaCheckError(); // Launch the kernel. compute_sparsity_kernel<8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, HAS_DIAG, COUNT_ONLY> <<< GRID_SIZE, CTA_SIZE, 0, thrust::global_thread_handle::get_stream()>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, aggregates, Ac_rows, Ac_cols, Ac_pos, hash_wk.get_gmem_size(), hash_wk.get_keys(), hash_wk.get_work_queue(), hash_wk.get_status() ); cudaCheckError(); // Read the result from count_non_zeroes. cudaMemcpyAsync( p_status, hash_wk.get_status(), sizeof(int), cudaMemcpyDeviceToHost, thrust::global_thread_handle::get_stream() ); cudaStreamSynchronize(thrust::global_thread_handle::get_stream()); done = (*p_status == 0); cudaCheckError(); } thrust::global_thread_handle::cudaFreeHost(h_status); thrust::global_thread_handle::cudaFreeHost(h_work_offset); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int CTA_SIZE, bool HAS_DIAG, typename Workspace, typename Value_type > static void fill_A_dispatch( Workspace &hash_wk, const int block_size, const int R_num_rows, // same as num_aggregates. const int *R_rows, const int *R_cols, const int *A_rows, const int *A_cols, const int *A_diag, const Value_type *A_vals, const int *aggregates, const int *Ac_rows, const int *Ac_cols, const int *Ac_pos, const int *Ac_diag, Value_type *Ac_vals, bool force_determinism ) { cudaDeviceProp props = getDeviceProperties(); int GRID_SIZE = (props.major >= 7) ? 1024 : 128; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; int work_offset = GRID_SIZE * NUM_WARPS; cudaMemcpyAsync( hash_wk.get_work_queue(), &work_offset, sizeof(int), cudaMemcpyHostToDevice, thrust::global_thread_handle::get_stream() ); cudaCheckError(); // Launch the kernel. switch ( block_size ) { case 1: fill_A_kernel_1x1<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG> <<< GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), hash_wk.get_vals(), hash_wk.get_work_queue() ); break; case 2: fill_A_kernel_NxN<Value_type, 2, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false> <<< GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 3: fill_A_kernel_NxN<Value_type, 3, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false> <<< GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 4: if ( force_determinism ) fill_A_kernel_NxN<Value_type, 4, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, true> <<< GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); else fill_A_kernel_4x4<Value_type, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG> <<< GRID_SIZE, CTA_SIZE, 0, thrust::global_thread_handle::get_stream()>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 5: fill_A_kernel_NxN<Value_type, 5, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false> <<< GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 8: fill_A_kernel_NxN_large<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 2> <<< GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; case 10: fill_A_kernel_NxN_large<Value_type, 10, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 4> <<< GRID_SIZE, CTA_SIZE>>>( R_num_rows, R_rows, R_cols, A_rows, A_cols, A_diag, A_vals, aggregates, Ac_rows, Ac_cols, Ac_pos, Ac_diag, Ac_vals, hash_wk.get_gmem_size(), hash_wk.get_keys(), reinterpret_cast<int *>( hash_wk.get_vals() ), hash_wk.get_work_queue() ); break; default: FatalError( "LOW_DEG not implemented for this block size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void LowDegCoarseAGenerator<TemplateConfig<AMGX_device, V, M, I> >::computeAOperator( const Matrix_d &A, Matrix_d &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates ) { if ( A.get_block_dimx() != A.get_block_dimy() ) { FatalError( "LowDegCoarseAGenerator implemented for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } // The matrix Ac will be modified. Ac.set_initialized(0); // Is the diagonal stored separately?? const int diag_prop = A.hasProps(DIAG); // Allocate a workspace for hashing. typedef TemplateConfig<AMGX_device, V, M, I> TConfig_d; cudaDeviceProp props = getDeviceProperties(); int grid_size = (props.major >= 7) ? 1024 : 128; Hash_Workspace<TConfig_d, int> hash_wk(true, grid_size); // Compute row offsets of Ac. Ac.addProps(CSR); Ac.set_num_rows( num_aggregates ); Ac.set_num_cols( num_aggregates ); Ac.row_offsets.resize( num_aggregates + 1 ); // Compute the number of non-zero elements per row of Ac. const int CTA_SIZE = 128; if ( diag_prop ) compute_sparsity_dispatch<CTA_SIZE, true, true>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), NULL, NULL ); else compute_sparsity_dispatch<CTA_SIZE, false, true>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), NULL, NULL ); cudaCheckError(); // Compute the number of non-zeroes. thrust_wrapper::exclusive_scan( Ac.row_offsets.begin(), Ac.row_offsets.end(), Ac.row_offsets.begin() ); cudaCheckError(); int nonzero_blocks = Ac.row_offsets[num_aggregates]; if ( diag_prop ) { Ac.addProps(DIAG); } if ( A.is_matrix_singleGPU() ) { Ac.resize( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), !diag_prop ); } else { //have 3% more nz for storage Ac.resize_spare( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), 1.0 ); if ( diag_prop ) { Ac.computeDiagonal(); } } // Vector to store the positions in the hash table. device_vector_alloc<int> Ac_pos(nonzero_blocks); // Compute the sparsity pattern of the rows of Ac. if ( diag_prop ) compute_sparsity_dispatch<CTA_SIZE, true, false>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() )); else compute_sparsity_dispatch<CTA_SIZE, false, false>( hash_wk, num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() )); cudaCheckError(); // Reset values if needed. if ( A.get_block_dimy() != 1 ) { thrust::fill( Ac.values.begin(), Ac.values.end(), types::util<ValueType>::get_zero() ); cudaCheckError(); } // Compute values. if ( diag_prop ) { fill_A_dispatch<CTA_SIZE, true>( hash_wk, A.get_block_dimy(), num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() ), Ac.diag.raw(), Ac.values.raw(), this->m_force_determinism ); } else { fill_A_dispatch<CTA_SIZE, false>( hash_wk, A.get_block_dimy(), num_aggregates, R_row_offsets.raw(), R_column_indices.raw(), A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), aggregates.raw(), Ac.row_offsets.raw(), Ac.col_indices.raw(), thrust::raw_pointer_cast( &Ac_pos.front() ), Ac.diag.raw(), Ac.values.raw(), this->m_force_determinism ); } cudaCheckError(); // Update the diagonal if needed. if ( Ac.is_matrix_singleGPU() ) { Ac.computeDiagonal(); } cudaCheckError(); // Finalize the modification. Ac.set_initialized(1); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void LowDegCoarseAGenerator<TemplateConfig<AMGX_host, V, M, I> >::computeAOperator( const Matrix_h &h_A, Matrix_h &h_Ac, const IVector &h_aggregates, const IVector &h_R_row_offsets, const IVector &h_R_column_indices, const int num_aggregates ) { h_Ac.set_initialized(0); IVector rows; IVector inds; typename Matrix_h::MVector vals; typename Matrix_h::MVector diag; int num_nnz = 0; int diag_prop = h_A.hasProps(DIAG); for ( int row = 0; row < num_aggregates; row++ ) { for ( int col = 0; col < num_aggregates; col++ ) { int fill = 0; typename Matrix_h::MVector cur(h_A.get_block_size(), types::util<typename Matrix_h::value_type>::get_zero()); for ( int rc = h_R_row_offsets[row]; rc < h_R_row_offsets[row + 1]; rc++ ) { int j = h_R_column_indices[rc]; for ( int ac = h_A.row_offsets[j]; ac < h_A.row_offsets[j + 1] + diag_prop; ac++ ) { int k = (ac == h_A.row_offsets[j + 1]) ? j : h_A.col_indices[ac]; for ( int q = h_R_row_offsets[col]; q < h_R_row_offsets[col + 1]; q++ ) if ( k == h_R_column_indices[q] ) { fill = 1; int val_idx = (ac == h_A.row_offsets[j + 1]) ? h_A.get_num_nz() + j : ac; for ( int v = 0; v < h_A.get_block_size(); v++) { cur[v] = cur[v] + h_A.values[val_idx * h_A.get_block_size() + v]; } } } } if ( fill ) { if ( row != col || !diag_prop ) { inds.push_back(col); rows.push_back(row); num_nnz++; for ( int v = 0; v < h_A.get_block_size(); v++ ) { vals.push_back(cur[v]); } } else { for ( int v = 0; v < h_A.get_block_size(); v++ ) { diag.push_back(cur[v]); } } } } } rows.push_back(-1); // add diagonal to the end if ( diag_prop ) { for ( int v = 0; v < num_aggregates * h_A.get_block_size(); v++ ) { vals.push_back(diag[v]); } } else { // Add a zero at the end for (int v = 0; v < h_A.get_block_size(); v++) { vals.push_back(types::util<typename Matrix_h::value_type>::get_zero()); } } h_Ac.resize(num_aggregates, num_aggregates, num_nnz, h_A.get_block_dimx(), h_A.get_block_dimy(), 1); h_Ac.row_indices = rows; h_Ac.col_indices = inds; h_Ac.values = vals; h_Ac.addProps( CSR | ( diag_prop ? DIAG : 0 ) ); h_Ac.computeDiagonal(); h_Ac.set_initialized(1); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define AMGX_CASE_LINE(CASE) template class LowDegCoarseAGenerator<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace aggregation } // namespace amgx
c149689c227c32449077e27f99bec44d96051186.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star3d1r-32x32-7-128_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 18; const AN5D_TYPE __side3Len = 18; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6) && __local_c3 >= (__halo3 * 6) && __local_c3 < __side3LenOl - (__halo3 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7) && __local_c3 >= (__halo3 * 7) && __local_c3 < __side3LenOl - (__halo3 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6) && __local_c3 >= (__halo3 * 6) && __local_c3 < __side3LenOl - (__halo3 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 22; const AN5D_TYPE __side3Len = 22; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 24; const AN5D_TYPE __side3Len = 24; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 28; const AN5D_TYPE __side3Len = 28; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 30; const AN5D_TYPE __side3Len = 30; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
c149689c227c32449077e27f99bec44d96051186.cu
#include "star3d1r-32x32-7-128_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 18; const AN5D_TYPE __side3Len = 18; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6) && __local_c3 >= (__halo3 * 6) && __local_c3 < __side3LenOl - (__halo3 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7) && __local_c3 >= (__halo3 * 7) && __local_c3 < __side3LenOl - (__halo3 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6) && __local_c3 >= (__halo3 * 6) && __local_c3 < __side3LenOl - (__halo3 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 22; const AN5D_TYPE __side3Len = 22; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 24; const AN5D_TYPE __side3Len = 24; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 28; const AN5D_TYPE __side3Len = 28; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 1 - 1); const AN5D_TYPE __c3Pad = (1); #define __c3 c3 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __halo3 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 30; const AN5D_TYPE __side3Len = 30; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
cdb4f2302fa934c2746a6eca52580b529eac75a0.hip
// !!! This is a file automatically generated by hipify!!! #include "deform.cuh" #include <npp.h> deform::deform(size_t ntheta, size_t nz, size_t n, size_t ptheta, size_t ngpus) : ntheta(ntheta), nz(nz), n(n), ptheta(ptheta), ngpus(ngpus) { cstreams = new hipStream_t[ptheta*ngpus]; nstreams = new NppStreamContext[ptheta*ngpus]; for (int igpu=0;igpu<ngpus;igpu++) { hipSetDevice(igpu); for (int i=0;i<ptheta; i++) { hipStreamCreate(&cstreams[igpu*ptheta+i]); nstreams[igpu*ptheta+i].hStream=cstreams[igpu*ptheta+i]; } } hipSetDevice(0); } // destructor, memory deallocation deform::~deform() { free(); } void deform::free() { if (!is_free) { for (int igpu=0;igpu<ngpus;igpu++) { hipSetDevice(igpu); for (int i=0;i<ptheta;i++) { hipStreamDestroy(cstreams[igpu*ptheta+i]); } } delete[] cstreams; delete[] nstreams; is_free = true; hipSetDevice(0); } } void deform::remap(size_t g, size_t f, size_t flowx, size_t flowy, size_t gpu) { Npp32f *pSrc = (Npp32f *)f; NppiSize oSize = {(int)n,(int)nz}; Npp32f *pDst = (Npp32f *)g; NppiRect oROI = {0,0,(int)n,(int)nz}; int nStep = 4*n; Npp32f *pXMap = (Npp32f *)flowx; Npp32f *pYMap = (Npp32f *)flowy; int nXMapStep = 4*n; int nYMapStep = 4*n; hipSetDevice(gpu); for (int i=0;i<ptheta;i++) { nppiRemap_32f_C1R_Ctx(&pSrc[i*n*nz],oSize,nStep, oROI, &pXMap[i*n*nz], nXMapStep, &pYMap[i*n*nz], nYMapStep, &pDst[i*n*nz], nStep, oSize, NPPI_INTER_LANCZOS,//NPPI_INTER_LANCZOS,//NPPI_INTER_LANCZOS3_ADVANCED,NPPI_INTER_CUBIC nstreams[gpu*ptheta+i]); //nppiRemap_32f_C1R (const Npp32f *pSrc, NppiSize oSrcSize, int nSrcStep, NppiRect oSrcROI, const Npp32f *pXMap, int nXMapStep, const Npp32f *pYMap, int nYMapStep, Npp32f *pDst, int nDstStep, NppiSize oDstSizeROI, int eInterpolation) } hipDeviceSynchronize(); }
cdb4f2302fa934c2746a6eca52580b529eac75a0.cu
#include "deform.cuh" #include <npp.h> deform::deform(size_t ntheta, size_t nz, size_t n, size_t ptheta, size_t ngpus) : ntheta(ntheta), nz(nz), n(n), ptheta(ptheta), ngpus(ngpus) { cstreams = new cudaStream_t[ptheta*ngpus]; nstreams = new NppStreamContext[ptheta*ngpus]; for (int igpu=0;igpu<ngpus;igpu++) { cudaSetDevice(igpu); for (int i=0;i<ptheta; i++) { cudaStreamCreate(&cstreams[igpu*ptheta+i]); nstreams[igpu*ptheta+i].hStream=cstreams[igpu*ptheta+i]; } } cudaSetDevice(0); } // destructor, memory deallocation deform::~deform() { free(); } void deform::free() { if (!is_free) { for (int igpu=0;igpu<ngpus;igpu++) { cudaSetDevice(igpu); for (int i=0;i<ptheta;i++) { cudaStreamDestroy(cstreams[igpu*ptheta+i]); } } delete[] cstreams; delete[] nstreams; is_free = true; cudaSetDevice(0); } } void deform::remap(size_t g, size_t f, size_t flowx, size_t flowy, size_t gpu) { Npp32f *pSrc = (Npp32f *)f; NppiSize oSize = {(int)n,(int)nz}; Npp32f *pDst = (Npp32f *)g; NppiRect oROI = {0,0,(int)n,(int)nz}; int nStep = 4*n; Npp32f *pXMap = (Npp32f *)flowx; Npp32f *pYMap = (Npp32f *)flowy; int nXMapStep = 4*n; int nYMapStep = 4*n; cudaSetDevice(gpu); for (int i=0;i<ptheta;i++) { nppiRemap_32f_C1R_Ctx(&pSrc[i*n*nz],oSize,nStep, oROI, &pXMap[i*n*nz], nXMapStep, &pYMap[i*n*nz], nYMapStep, &pDst[i*n*nz], nStep, oSize, NPPI_INTER_LANCZOS,//NPPI_INTER_LANCZOS,//NPPI_INTER_LANCZOS3_ADVANCED,NPPI_INTER_CUBIC nstreams[gpu*ptheta+i]); //nppiRemap_32f_C1R (const Npp32f *pSrc, NppiSize oSrcSize, int nSrcStep, NppiRect oSrcROI, const Npp32f *pXMap, int nXMapStep, const Npp32f *pYMap, int nYMapStep, Npp32f *pDst, int nDstStep, NppiSize oDstSizeROI, int eInterpolation) } cudaDeviceSynchronize(); }
14d14e30a1abadc9c053bbb3d24090da6bbe4f1a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdint.h> #include <stdlib.h> #include <mutex> #include <string> #include <unordered_map> #include "nvtx.hpp" namespace ML { /** * @brief An internal struct to store associated state with the color * generator */ struct ColorGenState { /** collection of all tagged colors generated so far */ static std::unordered_map<std::string, uint32_t> allColors; /** mutex for accessing the above map */ static std::mutex mapMutex; /** saturation */ static constexpr float S = 0.9f; /** value */ static constexpr float V = 0.85f; /** golden ratio */ static constexpr float Phi = 1.61803f; /** inverse golden ratio */ static constexpr float InvPhi = 1.f / Phi; }; std::unordered_map<std::string, uint32_t> ColorGenState::allColors; std::mutex ColorGenState::mapMutex; // all h, s, v are in range [0, 1] // Ref: http://en.wikipedia.org/wiki/HSL_and_HSV#Converting_to_RGB uint32_t hsv2rgb(float h, float s, float v) { uint32_t out = 0xff000000u; if (s <= 0.0f) { return out; } // convert hue from [0, 1] range to [0, 360] float h_deg = h * 360.f; if (0.f > h_deg || h_deg >= 360.f) h_deg = 0.f; h_deg /= 60.f; int h_range = (int)h_deg; float h_mod = h_deg - h_range; float x = v * (1.f - s); float y = v * (1.f - (s * h_mod)); float z = v * (1.f - (s * (1.f - h_mod))); float r, g, b; switch (h_range) { case 0: r = v; g = z; b = x; break; case 1: r = y; g = v; b = x; break; case 2: r = x; g = v; b = z; break; case 3: r = x; g = y; b = v; break; case 4: r = z; g = x; b = v; break; case 5: default: r = v; g = x; b = y; break; } out |= (uint32_t(r * 256.f) << 16); out |= (uint32_t(g * 256.f) << 8); out |= uint32_t(b * 256.f); return out; } /** * @brief Helper method to generate 'visually distinct' colors. * Inspired from https://martin.ankerl.com/2009/12/09/how-to-create-random-colors-programmatically/ * However, if an associated tag is passed, it will look up in its history for * any generated color against this tag and if found, just returns it, else * generates a new color, assigns a tag to it and stores it for future usage. * Such a thing is very useful for nvtx markers where the ranges associated * with a specific tag should ideally get the same color for the purpose of * visualizing it on nsight-systems timeline. * @param tag look for any previously generated colors with this tag or * associate the currently generated color with it * @return returns 32b RGB integer with alpha channel set of 0xff */ uint32_t generateNextColor(const std::string &tag) { std::lock_guard<std::mutex> guard(ColorGenState::mapMutex); if (!tag.empty()) { auto itr = ColorGenState::allColors.find(tag); if (itr != ColorGenState::allColors.end()) { return itr->second; } } float h = rand() * 1.f / RAND_MAX; h += ColorGenState::InvPhi; if (h >= 1.f) h -= 1.f; auto rgb = hsv2rgb(h, ColorGenState::S, ColorGenState::V); if (!tag.empty()) { ColorGenState::allColors[tag] = rgb; } return rgb; } #ifdef NVTX_ENABLED #include <roctracer/roctx.h> nvtxDomainHandle_t domain = nvtxDomainCreateA("cuml_cpp"); void PUSH_RANGE(const char *name, hipStream_t stream) { CUDA_CHECK(hipStreamSynchronize(stream)); PUSH_RANGE(name); } void POP_RANGE(hipStream_t stream) { CUDA_CHECK(hipStreamSynchronize(stream)); POP_RANGE(); } void PUSH_RANGE(const char *name) { nvtxEventAttributes_t eventAttrib = {0}; eventAttrib.version = NVTX_VERSION; eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; eventAttrib.colorType = NVTX_COLOR_ARGB; eventAttrib.color = generateNextColor(name); eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; eventAttrib.message.ascii = name; nvtxDomainRangePushEx(domain, &eventAttrib); } void POP_RANGE() { nvtxDomainRangePop(domain); } #else // NVTX_ENABLED void PUSH_RANGE(const char *name, hipStream_t stream) {} void POP_RANGE(hipStream_t stream) {} void PUSH_RANGE(const char *name) {} void POP_RANGE() {} #endif // NVTX_ENABLED } // end namespace ML
14d14e30a1abadc9c053bbb3d24090da6bbe4f1a.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdint.h> #include <stdlib.h> #include <mutex> #include <string> #include <unordered_map> #include "nvtx.hpp" namespace ML { /** * @brief An internal struct to store associated state with the color * generator */ struct ColorGenState { /** collection of all tagged colors generated so far */ static std::unordered_map<std::string, uint32_t> allColors; /** mutex for accessing the above map */ static std::mutex mapMutex; /** saturation */ static constexpr float S = 0.9f; /** value */ static constexpr float V = 0.85f; /** golden ratio */ static constexpr float Phi = 1.61803f; /** inverse golden ratio */ static constexpr float InvPhi = 1.f / Phi; }; std::unordered_map<std::string, uint32_t> ColorGenState::allColors; std::mutex ColorGenState::mapMutex; // all h, s, v are in range [0, 1] // Ref: http://en.wikipedia.org/wiki/HSL_and_HSV#Converting_to_RGB uint32_t hsv2rgb(float h, float s, float v) { uint32_t out = 0xff000000u; if (s <= 0.0f) { return out; } // convert hue from [0, 1] range to [0, 360] float h_deg = h * 360.f; if (0.f > h_deg || h_deg >= 360.f) h_deg = 0.f; h_deg /= 60.f; int h_range = (int)h_deg; float h_mod = h_deg - h_range; float x = v * (1.f - s); float y = v * (1.f - (s * h_mod)); float z = v * (1.f - (s * (1.f - h_mod))); float r, g, b; switch (h_range) { case 0: r = v; g = z; b = x; break; case 1: r = y; g = v; b = x; break; case 2: r = x; g = v; b = z; break; case 3: r = x; g = y; b = v; break; case 4: r = z; g = x; b = v; break; case 5: default: r = v; g = x; b = y; break; } out |= (uint32_t(r * 256.f) << 16); out |= (uint32_t(g * 256.f) << 8); out |= uint32_t(b * 256.f); return out; } /** * @brief Helper method to generate 'visually distinct' colors. * Inspired from https://martin.ankerl.com/2009/12/09/how-to-create-random-colors-programmatically/ * However, if an associated tag is passed, it will look up in its history for * any generated color against this tag and if found, just returns it, else * generates a new color, assigns a tag to it and stores it for future usage. * Such a thing is very useful for nvtx markers where the ranges associated * with a specific tag should ideally get the same color for the purpose of * visualizing it on nsight-systems timeline. * @param tag look for any previously generated colors with this tag or * associate the currently generated color with it * @return returns 32b RGB integer with alpha channel set of 0xff */ uint32_t generateNextColor(const std::string &tag) { std::lock_guard<std::mutex> guard(ColorGenState::mapMutex); if (!tag.empty()) { auto itr = ColorGenState::allColors.find(tag); if (itr != ColorGenState::allColors.end()) { return itr->second; } } float h = rand() * 1.f / RAND_MAX; h += ColorGenState::InvPhi; if (h >= 1.f) h -= 1.f; auto rgb = hsv2rgb(h, ColorGenState::S, ColorGenState::V); if (!tag.empty()) { ColorGenState::allColors[tag] = rgb; } return rgb; } #ifdef NVTX_ENABLED #include <nvToolsExt.h> nvtxDomainHandle_t domain = nvtxDomainCreateA("cuml_cpp"); void PUSH_RANGE(const char *name, cudaStream_t stream) { CUDA_CHECK(cudaStreamSynchronize(stream)); PUSH_RANGE(name); } void POP_RANGE(cudaStream_t stream) { CUDA_CHECK(cudaStreamSynchronize(stream)); POP_RANGE(); } void PUSH_RANGE(const char *name) { nvtxEventAttributes_t eventAttrib = {0}; eventAttrib.version = NVTX_VERSION; eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; eventAttrib.colorType = NVTX_COLOR_ARGB; eventAttrib.color = generateNextColor(name); eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; eventAttrib.message.ascii = name; nvtxDomainRangePushEx(domain, &eventAttrib); } void POP_RANGE() { nvtxDomainRangePop(domain); } #else // NVTX_ENABLED void PUSH_RANGE(const char *name, cudaStream_t stream) {} void POP_RANGE(cudaStream_t stream) {} void PUSH_RANGE(const char *name) {} void POP_RANGE() {} #endif // NVTX_ENABLED } // end namespace ML
0664ae5707a3b3a10a5a0b2215aa370962d27030.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> // keeping THC headers for gpuAtomicAdd #include <THH/THHAtomics.cuh> #include <thrust/pair.h> namespace at { namespace native { namespace { using at::cuda::detail::canUse32BitIndexMath; __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping1d( int64_t input_w, int64_t output_w, int64_t output_x, int64_t pad_l) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_w; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_w; auto i_start_x = ::max(int64_t(0), -pad_l); auto o_start_x = ::max(int64_t(0), pad_l); int64_t input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_w + pad_l - 1)) - output_x + 2 * pad_l + input_w - 1 - o_start_x + i_start_x; return thrust::make_pair<int64_t, int64_t>( input_offset + input_x, output_offset + output_x); } __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping2d( int64_t input_dim_x, int64_t input_dim_y, int64_t output_dim_x, int64_t output_dim_y, int64_t pad_l, int64_t pad_t, int64_t output_xy) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_dim_x * input_dim_y; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_dim_x * output_dim_y; auto output_x = output_xy % output_dim_x; auto output_y = output_xy / output_dim_x; auto i_start_x = ::max(int64_t(0), -pad_l); auto i_start_y = ::max(int64_t(0), -pad_t); auto o_start_x = ::max(int64_t(0), pad_l); auto o_start_y = ::max(int64_t(0), pad_t); auto input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_dim_x + pad_l - 1)) - output_x + 2 * pad_l + input_dim_x - 1 - o_start_x + i_start_x; auto input_y = ::abs(output_y - pad_t) - ::abs(output_y - (input_dim_y + pad_t - 1)) - output_y + 2 * pad_t + input_dim_y - 1 - o_start_y + i_start_y; return thrust::make_pair<int64_t, int64_t>( input_offset + input_y * input_dim_x + input_x, output_offset + output_y * output_dim_x + output_x); } template<typename scalar_t> __global__ void reflection_pad1d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad1d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); gpuAtomicAdd( &grad_input[index_pair.first], grad_output[index_pair.second]); } } template<typename scalar_t> __global__ void reflection_pad2d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad2d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); gpuAtomicAdd(&grad_input[index_pair.first], grad_output[index_pair.second]); } } void reflection_pad1d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; TORCH_CHECK( (input_.ndimension() == 2 && input_.size(1) != 0) || (input_.ndimension() == 3 && input_.size(1) != 0 && input_.size(2) != 0), "2D or 3D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 3) { nbatch = input_.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input_.size(dim_plane); int64_t input_w = input_.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less " "than the corresponding input dimension, but got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_); TORCH_CHECK(output_w >= 1, "input (W: ", input_w, ")is too small. Calculated output W: ", output_w); if (input_.ndimension() == 2) { output.resize_({nplane, output_w}); } else { output.resize_({nbatch, nplane, output_w}); } if (output.numel() == 0) { return; } dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); Tensor input = input_.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "reflection_pad1d_out_template", [&] { hipLaunchKernelGGL(( reflection_pad1d_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); } ); AT_CUDA_CHECK(hipGetLastError()); } void reflection_pad1d_backward_out_template( Tensor & grad_input, const Tensor & grad_output_, const Tensor & input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; if (input.ndimension() == 3) { nbatch = input.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input.size(dim_plane); int64_t input_w = input.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; Tensor grad_output = grad_output_.contiguous(); TORCH_CHECK(output_w == grad_output.size(dim_w), "gradOutput width unexpected. Expected: ", output_w, ", Got: ", grad_output.size(dim_w)); dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_input.scalar_type(), "reflection_pad1d_backward_out_template", [&] { hipLaunchKernelGGL(( reflection_pad1d_backward_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); } ); AT_CUDA_CHECK(hipGetLastError()); } void reflection_pad2d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; bool valid_dims = input_.size(1) != 0 && input_.size(2) != 0; TORCH_CHECK( (input_.ndimension() == 3 && valid_dims) || (input_.ndimension() == 4 && valid_dims && input_.size(3) != 0), "3D or 4D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 4) { nbatch = input_.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input_.size(plane_dim); int input_h = input_.size(dim_h); int input_w = input_.size(dim_w); TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_.sizes()); TORCH_CHECK(pad_t < input_h && pad_b < input_h, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_t, ", ", pad_b, ") at dimension ", dim_h, " of input ", input_.sizes()); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w >= 1 || output_h >= 1, "input (H: ", input_h, ", W: ", input_w, ")is too small. Calculated " "output H: ", output_h, " W: ", output_w); if (input_.ndimension() == 3) { output.resize_({nplane, output_h, output_w}); } else { output.resize_({nbatch, nplane, output_h, output_w}); } if (output.numel() == 0) { return; } Tensor input = input_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) ::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "reflection_pad2d_out_template", [&] { hipLaunchKernelGGL(( reflection_pad2d_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); } ); AT_CUDA_CHECK(hipGetLastError()); } void reflection_pad2d_backward_out_template( Tensor &grad_input, const Tensor &grad_output_, const Tensor &input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "output gradient tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; if (input.ndimension() == 4) { nbatch = input.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input.size(plane_dim); int input_h = input.size(dim_h); int input_w = input.size(dim_w); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w == grad_output_.size(dim_w), "grad_output width " "unexpected. Expected: ", output_w, ", Got: ", grad_output_.size(dim_w)); TORCH_CHECK(output_h == grad_output_.size(dim_h), "grad_output height " "unexpected. Expected: ", output_h, ", Got: ", grad_output_.size(dim_h)); Tensor grad_output = grad_output_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) ::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "reflection_pad2d_backward_out_template", [&] { hipLaunchKernelGGL(( reflection_pad2d_backward_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); } ); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& reflection_pad1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef padding) { reflection_pad1d_out_template(output, input, padding); return output; } Tensor reflection_pad1d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad1d_out_template(output, input, padding); return output; } Tensor& reflection_pad1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad1d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor& reflection_pad2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef padding) { reflection_pad2d_out_template(output, input, padding); return output; } Tensor reflection_pad2d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad2d_out_template(output, input, padding); return output; } Tensor& reflection_pad2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad2d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } } // namespace native } // namespace at
0664ae5707a3b3a10a5a0b2215aa370962d27030.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> // keeping THC headers for gpuAtomicAdd #include <THC/THCAtomics.cuh> #include <thrust/pair.h> namespace at { namespace native { namespace { using at::cuda::detail::canUse32BitIndexMath; __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping1d( int64_t input_w, int64_t output_w, int64_t output_x, int64_t pad_l) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_w; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_w; auto i_start_x = ::max(int64_t(0), -pad_l); auto o_start_x = ::max(int64_t(0), pad_l); int64_t input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_w + pad_l - 1)) - output_x + 2 * pad_l + input_w - 1 - o_start_x + i_start_x; return thrust::make_pair<int64_t, int64_t>( input_offset + input_x, output_offset + output_x); } __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping2d( int64_t input_dim_x, int64_t input_dim_y, int64_t output_dim_x, int64_t output_dim_y, int64_t pad_l, int64_t pad_t, int64_t output_xy) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_dim_x * input_dim_y; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_dim_x * output_dim_y; auto output_x = output_xy % output_dim_x; auto output_y = output_xy / output_dim_x; auto i_start_x = ::max(int64_t(0), -pad_l); auto i_start_y = ::max(int64_t(0), -pad_t); auto o_start_x = ::max(int64_t(0), pad_l); auto o_start_y = ::max(int64_t(0), pad_t); auto input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_dim_x + pad_l - 1)) - output_x + 2 * pad_l + input_dim_x - 1 - o_start_x + i_start_x; auto input_y = ::abs(output_y - pad_t) - ::abs(output_y - (input_dim_y + pad_t - 1)) - output_y + 2 * pad_t + input_dim_y - 1 - o_start_y + i_start_y; return thrust::make_pair<int64_t, int64_t>( input_offset + input_y * input_dim_x + input_x, output_offset + output_y * output_dim_x + output_x); } template<typename scalar_t> __global__ void reflection_pad1d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad1d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); gpuAtomicAdd( &grad_input[index_pair.first], grad_output[index_pair.second]); } } template<typename scalar_t> __global__ void reflection_pad2d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad2d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); gpuAtomicAdd(&grad_input[index_pair.first], grad_output[index_pair.second]); } } void reflection_pad1d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; TORCH_CHECK( (input_.ndimension() == 2 && input_.size(1) != 0) || (input_.ndimension() == 3 && input_.size(1) != 0 && input_.size(2) != 0), "2D or 3D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 3) { nbatch = input_.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input_.size(dim_plane); int64_t input_w = input_.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less " "than the corresponding input dimension, but got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_); TORCH_CHECK(output_w >= 1, "input (W: ", input_w, ")is too small. Calculated output W: ", output_w); if (input_.ndimension() == 2) { output.resize_({nplane, output_w}); } else { output.resize_({nbatch, nplane, output_w}); } if (output.numel() == 0) { return; } dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); Tensor input = input_.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "reflection_pad1d_out_template", [&] { reflection_pad1d_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); } ); AT_CUDA_CHECK(cudaGetLastError()); } void reflection_pad1d_backward_out_template( Tensor & grad_input, const Tensor & grad_output_, const Tensor & input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; if (input.ndimension() == 3) { nbatch = input.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input.size(dim_plane); int64_t input_w = input.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; Tensor grad_output = grad_output_.contiguous(); TORCH_CHECK(output_w == grad_output.size(dim_w), "gradOutput width unexpected. Expected: ", output_w, ", Got: ", grad_output.size(dim_w)); dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_input.scalar_type(), "reflection_pad1d_backward_out_template", [&] { reflection_pad1d_backward_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); } ); AT_CUDA_CHECK(cudaGetLastError()); } void reflection_pad2d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; bool valid_dims = input_.size(1) != 0 && input_.size(2) != 0; TORCH_CHECK( (input_.ndimension() == 3 && valid_dims) || (input_.ndimension() == 4 && valid_dims && input_.size(3) != 0), "3D or 4D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 4) { nbatch = input_.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input_.size(plane_dim); int input_h = input_.size(dim_h); int input_w = input_.size(dim_w); TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_.sizes()); TORCH_CHECK(pad_t < input_h && pad_b < input_h, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_t, ", ", pad_b, ") at dimension ", dim_h, " of input ", input_.sizes()); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w >= 1 || output_h >= 1, "input (H: ", input_h, ", W: ", input_w, ")is too small. Calculated " "output H: ", output_h, " W: ", output_w); if (input_.ndimension() == 3) { output.resize_({nplane, output_h, output_w}); } else { output.resize_({nbatch, nplane, output_h, output_w}); } if (output.numel() == 0) { return; } Tensor input = input_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) std::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "reflection_pad2d_out_template", [&] { reflection_pad2d_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); } ); AT_CUDA_CHECK(cudaGetLastError()); } void reflection_pad2d_backward_out_template( Tensor &grad_input, const Tensor &grad_output_, const Tensor &input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "output gradient tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; if (input.ndimension() == 4) { nbatch = input.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input.size(plane_dim); int input_h = input.size(dim_h); int input_w = input.size(dim_w); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w == grad_output_.size(dim_w), "grad_output width " "unexpected. Expected: ", output_w, ", Got: ", grad_output_.size(dim_w)); TORCH_CHECK(output_h == grad_output_.size(dim_h), "grad_output height " "unexpected. Expected: ", output_h, ", Got: ", grad_output_.size(dim_h)); Tensor grad_output = grad_output_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) std::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "reflection_pad2d_backward_out_template", [&] { reflection_pad2d_backward_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); } ); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& reflection_pad1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef padding) { reflection_pad1d_out_template(output, input, padding); return output; } Tensor reflection_pad1d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad1d_out_template(output, input, padding); return output; } Tensor& reflection_pad1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad1d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor& reflection_pad2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef padding) { reflection_pad2d_out_template(output, input, padding); return output; } Tensor reflection_pad2d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad2d_out_template(output, input, padding); return output; } Tensor& reflection_pad2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad2d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } } // namespace native } // namespace at
65ba7c094a0cace7887801925893c114f87b33db.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <raft/random/rng.cuh> #include <sparse/op/sort.h> #include <raft/mr/device/allocator.hpp> #include <iostream> namespace raft { namespace sparse { template <typename T> struct SparseSortInput { int m, n, nnz; unsigned long long int seed; }; template <typename T> class SparseSortTest : public ::testing::TestWithParam<SparseSortInput<T>> { protected: void SetUp() override {} void TearDown() override {} protected: SparseSortInput<T> params; }; const std::vector<SparseSortInput<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef SparseSortTest<float> COOSort; TEST_P(COOSort, Result) { int *in_rows, *in_cols, *verify; float *in_vals; params = ::testing::TestWithParam<SparseSortInput<float>>::GetParam(); raft::random::Rng r(params.seed); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); std::shared_ptr<raft::mr::device::allocator> alloc( new raft::mr::device::default_allocator); raft::allocate(in_vals, params.nnz); r.uniform(in_vals, params.nnz, float(-1.0), float(1.0), stream); int *in_rows_h = (int *)malloc(params.nnz * sizeof(int)); int *in_cols_h = (int *)malloc(params.nnz * sizeof(int)); int *verify_h = (int *)malloc(params.nnz * sizeof(int)); for (int i = 0; i < params.nnz; i++) { in_rows_h[i] = params.nnz - i - 1; verify_h[i] = i; in_cols_h[i] = i; } raft::allocate(in_rows, params.nnz); raft::allocate(in_cols, params.nnz); raft::allocate(verify, params.nnz); raft::update_device(in_rows, in_rows_h, params.nnz, stream); raft::update_device(in_cols, in_cols_h, params.nnz, stream); raft::update_device(verify, verify_h, params.nnz, stream); op::coo_sort(params.m, params.n, params.nnz, in_rows, in_cols, in_vals, alloc, stream); ASSERT_TRUE( raft::devArrMatch<int>(verify, in_rows, params.nnz, raft::Compare<int>())); delete[] in_rows_h; delete[] in_cols_h; delete[] verify_h; CUDA_CHECK(hipFree(in_rows)); CUDA_CHECK(hipFree(in_cols)); CUDA_CHECK(hipFree(in_vals)); CUDA_CHECK(hipFree(verify)); CUDA_CHECK(hipStreamDestroy(stream)); } INSTANTIATE_TEST_CASE_P(SparseSortTest, COOSort, ::testing::ValuesIn(inputsf)); } // namespace sparse } // namespace raft
65ba7c094a0cace7887801925893c114f87b33db.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <raft/random/rng.cuh> #include <sparse/op/sort.h> #include <raft/mr/device/allocator.hpp> #include <iostream> namespace raft { namespace sparse { template <typename T> struct SparseSortInput { int m, n, nnz; unsigned long long int seed; }; template <typename T> class SparseSortTest : public ::testing::TestWithParam<SparseSortInput<T>> { protected: void SetUp() override {} void TearDown() override {} protected: SparseSortInput<T> params; }; const std::vector<SparseSortInput<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef SparseSortTest<float> COOSort; TEST_P(COOSort, Result) { int *in_rows, *in_cols, *verify; float *in_vals; params = ::testing::TestWithParam<SparseSortInput<float>>::GetParam(); raft::random::Rng r(params.seed); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); std::shared_ptr<raft::mr::device::allocator> alloc( new raft::mr::device::default_allocator); raft::allocate(in_vals, params.nnz); r.uniform(in_vals, params.nnz, float(-1.0), float(1.0), stream); int *in_rows_h = (int *)malloc(params.nnz * sizeof(int)); int *in_cols_h = (int *)malloc(params.nnz * sizeof(int)); int *verify_h = (int *)malloc(params.nnz * sizeof(int)); for (int i = 0; i < params.nnz; i++) { in_rows_h[i] = params.nnz - i - 1; verify_h[i] = i; in_cols_h[i] = i; } raft::allocate(in_rows, params.nnz); raft::allocate(in_cols, params.nnz); raft::allocate(verify, params.nnz); raft::update_device(in_rows, in_rows_h, params.nnz, stream); raft::update_device(in_cols, in_cols_h, params.nnz, stream); raft::update_device(verify, verify_h, params.nnz, stream); op::coo_sort(params.m, params.n, params.nnz, in_rows, in_cols, in_vals, alloc, stream); ASSERT_TRUE( raft::devArrMatch<int>(verify, in_rows, params.nnz, raft::Compare<int>())); delete[] in_rows_h; delete[] in_cols_h; delete[] verify_h; CUDA_CHECK(cudaFree(in_rows)); CUDA_CHECK(cudaFree(in_cols)); CUDA_CHECK(cudaFree(in_vals)); CUDA_CHECK(cudaFree(verify)); CUDA_CHECK(cudaStreamDestroy(stream)); } INSTANTIATE_TEST_CASE_P(SparseSortTest, COOSort, ::testing::ValuesIn(inputsf)); } // namespace sparse } // namespace raft
e18f8f4d3bf4edad074015c3a992280232696fb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define N 512 #define BLOCK_DIM 32 #include<stdio.h> int a[N][N], b[N][N], c[N][N]; __global__ void add(int *a, int *b, int *c){ int row = threadIdx.x + blockDim.x * blockIdx.x; int col = threadIdx.y + blockDim.y * blockIdx.y; c[row][col] = a[row][col] + b[row][col]; } int main(){ int *dev_a, *dev_b, *dev_c; hipMalloc((void **) &dev_a, N*N * sizeof(int)); hipMalloc((void **) &dev_b, N*N * sizeof(int)); hipMalloc((void **) &dev_c, N*N * sizeof(int)); for(int j = 0; j < N; j++){ for(int m = 0; m < N; m++){ a[j][m] = 2; b[j][m] = 3; } } hipMemcpy2D(dev_a, a, (N * N * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy2D(dev_b, b, (N * N * sizeof(int)), hipMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid((int)ceil(N / dimBlock.x), (int)ceil(N / dimBlock.y)); hipLaunchKernelGGL(( add), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, (N * sizeof(int)), hipMemcpyDeviceToHost); for(int j = 0; j <= 5; j++){ for(int m = 0; m < N; m++){ printf("\n %d",c[j][m]); } } hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
e18f8f4d3bf4edad074015c3a992280232696fb6.cu
#define N 512 #define BLOCK_DIM 32 #include<stdio.h> int a[N][N], b[N][N], c[N][N]; __global__ void add(int *a, int *b, int *c){ int row = threadIdx.x + blockDim.x * blockIdx.x; int col = threadIdx.y + blockDim.y * blockIdx.y; c[row][col] = a[row][col] + b[row][col]; } int main(){ int *dev_a, *dev_b, *dev_c; cudaMalloc((void **) &dev_a, N*N * sizeof(int)); cudaMalloc((void **) &dev_b, N*N * sizeof(int)); cudaMalloc((void **) &dev_c, N*N * sizeof(int)); for(int j = 0; j < N; j++){ for(int m = 0; m < N; m++){ a[j][m] = 2; b[j][m] = 3; } } cudaMemcpy2D(dev_a, a, (N * N * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy2D(dev_b, b, (N * N * sizeof(int)), cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid((int)ceil(N / dimBlock.x), (int)ceil(N / dimBlock.y)); add<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, (N * sizeof(int)), cudaMemcpyDeviceToHost); for(int j = 0; j <= 5; j++){ for(int m = 0; m < N; m++){ printf("\n %d",c[j][m]); } } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
a577ad35cb57b8cdcbb21020d37aa44700f4dad5.hip
// !!! This is a file automatically generated by hipify!!! #include "object/material/checkerboard_material.hpp" using namespace px; BaseCheckerboardMaterial::BaseCheckerboardMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, PREC const &dim_scale, PREC const &color_scale) : _ambient(ambient), _diffuse(diffuse), _specular(specular), _shininessonent(shininessonent), _transmissive(transmissive), _refractive_index(refractive_index), _dim_scale(dim_scale), _color_scale(color_scale) {} PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getAmbient(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseCheckerboardMaterial*>(obj); if (((u > 0 ? ::fmod(u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : ::fmod(-u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5) ^ (v > 0 ? ::fmod(v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : ::fmod(-v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5)) == 1) return o->_ambient; return o->_ambient*o->_color_scale; } PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getDiffuse(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseCheckerboardMaterial*>(obj); if (((u > 0 ? ::fmod(u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : ::fmod(-u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5) ^ (v > 0 ? ::fmod(v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : ::fmod(-v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5)) == 1) return o->_diffuse; return o->_diffuse*o->_color_scale; } PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getSpecular(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_specular; } PX_CUDA_CALLABLE PREC BaseCheckerboardMaterial::getShininess(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_shininessonent; } PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getTransmissive(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_transmissive; } PX_CUDA_CALLABLE PREC BaseCheckerboardMaterial::getRefractiveIndex(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_refractive_index; } void BaseCheckerboardMaterial::setAmbient(Light const &ambient) { _ambient = ambient; } void BaseCheckerboardMaterial::setDiffuse(Light const &diffuse) { _diffuse = diffuse; } void BaseCheckerboardMaterial::setSpecular(Light const &specular) { _specular = specular; } void BaseCheckerboardMaterial::setShininess(PREC const &shininess) { _shininessonent = shininess; } void BaseCheckerboardMaterial::setTransmissive(Light const &transmissive) { _transmissive = transmissive; } void BaseCheckerboardMaterial::setRefractiveIndex(PREC const &ior) { _refractive_index = ior; } void BaseCheckerboardMaterial::setDimScale(PREC const &scale) { _dim_scale = scale; } void BaseCheckerboardMaterial::setColorScale(PREC const &scale) { _color_scale = scale; } std::shared_ptr<BaseMaterial> CheckerboardMaterial::create(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, PREC const &dim_scale, PREC const &color_scale) { return std::shared_ptr<BaseMaterial>(new CheckerboardMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, dim_scale, color_scale)); } CheckerboardMaterial::CheckerboardMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, PREC const &dim_scale, PREC const &color_scale) : BaseMaterial(), _obj(new BaseCheckerboardMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, dim_scale, color_scale)), _gpu_obj(nullptr), _need_upload(true) {} CheckerboardMaterial::~CheckerboardMaterial() { delete _obj; #ifdef USE_ROCM clearGpuData(); #endif } #ifdef USE_ROCM __device__ fnAmbient_t __fn_ambient_checkerboard_material = BaseCheckerboardMaterial::getAmbient; __device__ fnDiffuse_t __fn_diffuse_checkerboard_material = BaseCheckerboardMaterial::getDiffuse; __device__ fnSpecular_t __fn_specular_checkerboard_material = BaseCheckerboardMaterial::getSpecular; __device__ fnShininess_t __fn_shininess_checkerboard_material = BaseCheckerboardMaterial::getShininess; __device__ fnTransmissive_t __fn_transmissive_checkerboard_material = BaseCheckerboardMaterial::getTransmissive; __device__ fnRefractiveIndex_t __fn_refractive_index_checkerboard_material = BaseCheckerboardMaterial::getRefractiveIndex; #endif void CheckerboardMaterial::up2Gpu() { #ifdef USE_ROCM static fnAmbient_t fn_ambient_h = nullptr; static fnDiffuse_t fn_diffuse_h; static fnSpecular_t fn_specular_h; static fnShininess_t fn_shininess_h; static fnTransmissive_t fn_transmissive_h; static fnRefractiveIndex_t fn_refractive_index_h; if (_need_upload) { if (dev_ptr == nullptr) { PX_CUDA_CHECK(hipMalloc(&_gpu_obj, sizeof(BaseCheckerboardMaterial))); PX_CUDA_CHECK(hipMalloc(&dev_ptr, sizeof(MaterialObj))); } if (fn_ambient_h == nullptr) { PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_ambient_h, __fn_ambient_checkerboard_material, sizeof(fnAmbient_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_diffuse_h, __fn_diffuse_checkerboard_material, sizeof(fnDiffuse_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_specular_h, __fn_specular_checkerboard_material, sizeof(fnSpecular_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_shininess_h, __fn_shininess_checkerboard_material, sizeof(fnShininess_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_transmissive_h, __fn_transmissive_checkerboard_material, sizeof(fnTransmissive_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_refractive_index_h, __fn_refractive_index_checkerboard_material, sizeof(fnRefractiveIndex_t))); } PX_CUDA_CHECK(hipMemcpy(_gpu_obj, _obj, sizeof(BaseCheckerboardMaterial), hipMemcpyHostToDevice)); MaterialObj tmp(_gpu_obj, fn_ambient_h, fn_diffuse_h, fn_specular_h, fn_shininess_h, fn_transmissive_h, fn_refractive_index_h); PX_CUDA_CHECK(hipMemcpy(dev_ptr, &tmp, sizeof(MaterialObj), hipMemcpyHostToDevice)); _need_upload = false; } #endif } void CheckerboardMaterial::clearGpuData() { #ifdef USE_ROCM if (_gpu_obj != nullptr) { _gpu_obj = nullptr; PX_CUDA_CHECK(hipFree(_gpu_obj)); } BaseMaterial::clearGpuData(); #endif } PREC CheckerboardMaterial::Shininess(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getShininess(_obj, u, v, w); } PREC CheckerboardMaterial::refractiveIndex(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getRefractiveIndex(_obj, u, v, w); } Light CheckerboardMaterial::getAmbient(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getAmbient(_obj, u, v, w); } Light CheckerboardMaterial::getDiffuse(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getDiffuse(_obj, u, v, w); } Light CheckerboardMaterial::getSpecular(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getSpecular(_obj, u, v, w); } Light CheckerboardMaterial::getTransmissive(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getTransmissive(_obj, u, v, w); } void CheckerboardMaterial::setAmbient(Light const &ambient) { _obj->setAmbient(ambient); #ifdef USE_ROCM _need_upload = true; #endif } void CheckerboardMaterial::setDiffuse(Light const &diffuse) { _obj->setDiffuse(diffuse); #ifdef USE_ROCM _need_upload = true; #endif } void CheckerboardMaterial::setSpecular(Light const &specular) { _obj->setSpecular(specular); #ifdef USE_ROCM _need_upload = true; #endif } void CheckerboardMaterial::setShininess(PREC const &shininess) { _obj->setShininess(shininess); #ifdef USE_ROCM _need_upload = true; #endif } void CheckerboardMaterial::setTransmissive(Light const &transmissive) { _obj->setTransmissive(transmissive); #ifdef USE_ROCM _need_upload = true; #endif } void CheckerboardMaterial::setRefractiveIndex(PREC const &ior) { _obj->setRefractiveIndex(ior); #ifdef USE_ROCM _need_upload = true; #endif } void CheckerboardMaterial::setDimScale(PREC const &scale) { _obj->setDimScale(scale); #ifdef USE_ROCM _need_upload = true; #endif } void CheckerboardMaterial::setColorScale(PREC const &scale) { _obj->setColorScale(scale); #ifdef USE_ROCM _need_upload = true; #endif }
a577ad35cb57b8cdcbb21020d37aa44700f4dad5.cu
#include "object/material/checkerboard_material.hpp" using namespace px; BaseCheckerboardMaterial::BaseCheckerboardMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, PREC const &dim_scale, PREC const &color_scale) : _ambient(ambient), _diffuse(diffuse), _specular(specular), _shininessonent(shininessonent), _transmissive(transmissive), _refractive_index(refractive_index), _dim_scale(dim_scale), _color_scale(color_scale) {} PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getAmbient(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseCheckerboardMaterial*>(obj); if (((u > 0 ? std::fmod(u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : std::fmod(-u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5) ^ (v > 0 ? std::fmod(v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : std::fmod(-v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5)) == 1) return o->_ambient; return o->_ambient*o->_color_scale; } PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getDiffuse(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseCheckerboardMaterial*>(obj); if (((u > 0 ? std::fmod(u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : std::fmod(-u * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5) ^ (v > 0 ? std::fmod(v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) > 0.5 : std::fmod(-v * o->_dim_scale, static_cast<decltype(o->_dim_scale)>(1.0)) <= 0.5)) == 1) return o->_diffuse; return o->_diffuse*o->_color_scale; } PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getSpecular(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_specular; } PX_CUDA_CALLABLE PREC BaseCheckerboardMaterial::getShininess(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_shininessonent; } PX_CUDA_CALLABLE Light BaseCheckerboardMaterial::getTransmissive(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_transmissive; } PX_CUDA_CALLABLE PREC BaseCheckerboardMaterial::getRefractiveIndex(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { return reinterpret_cast<BaseCheckerboardMaterial*>(obj)->_refractive_index; } void BaseCheckerboardMaterial::setAmbient(Light const &ambient) { _ambient = ambient; } void BaseCheckerboardMaterial::setDiffuse(Light const &diffuse) { _diffuse = diffuse; } void BaseCheckerboardMaterial::setSpecular(Light const &specular) { _specular = specular; } void BaseCheckerboardMaterial::setShininess(PREC const &shininess) { _shininessonent = shininess; } void BaseCheckerboardMaterial::setTransmissive(Light const &transmissive) { _transmissive = transmissive; } void BaseCheckerboardMaterial::setRefractiveIndex(PREC const &ior) { _refractive_index = ior; } void BaseCheckerboardMaterial::setDimScale(PREC const &scale) { _dim_scale = scale; } void BaseCheckerboardMaterial::setColorScale(PREC const &scale) { _color_scale = scale; } std::shared_ptr<BaseMaterial> CheckerboardMaterial::create(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, PREC const &dim_scale, PREC const &color_scale) { return std::shared_ptr<BaseMaterial>(new CheckerboardMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, dim_scale, color_scale)); } CheckerboardMaterial::CheckerboardMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, PREC const &dim_scale, PREC const &color_scale) : BaseMaterial(), _obj(new BaseCheckerboardMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, dim_scale, color_scale)), _gpu_obj(nullptr), _need_upload(true) {} CheckerboardMaterial::~CheckerboardMaterial() { delete _obj; #ifdef USE_CUDA clearGpuData(); #endif } #ifdef USE_CUDA __device__ fnAmbient_t __fn_ambient_checkerboard_material = BaseCheckerboardMaterial::getAmbient; __device__ fnDiffuse_t __fn_diffuse_checkerboard_material = BaseCheckerboardMaterial::getDiffuse; __device__ fnSpecular_t __fn_specular_checkerboard_material = BaseCheckerboardMaterial::getSpecular; __device__ fnShininess_t __fn_shininess_checkerboard_material = BaseCheckerboardMaterial::getShininess; __device__ fnTransmissive_t __fn_transmissive_checkerboard_material = BaseCheckerboardMaterial::getTransmissive; __device__ fnRefractiveIndex_t __fn_refractive_index_checkerboard_material = BaseCheckerboardMaterial::getRefractiveIndex; #endif void CheckerboardMaterial::up2Gpu() { #ifdef USE_CUDA static fnAmbient_t fn_ambient_h = nullptr; static fnDiffuse_t fn_diffuse_h; static fnSpecular_t fn_specular_h; static fnShininess_t fn_shininess_h; static fnTransmissive_t fn_transmissive_h; static fnRefractiveIndex_t fn_refractive_index_h; if (_need_upload) { if (dev_ptr == nullptr) { PX_CUDA_CHECK(cudaMalloc(&_gpu_obj, sizeof(BaseCheckerboardMaterial))); PX_CUDA_CHECK(cudaMalloc(&dev_ptr, sizeof(MaterialObj))); } if (fn_ambient_h == nullptr) { PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_ambient_h, __fn_ambient_checkerboard_material, sizeof(fnAmbient_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_diffuse_h, __fn_diffuse_checkerboard_material, sizeof(fnDiffuse_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_specular_h, __fn_specular_checkerboard_material, sizeof(fnSpecular_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_shininess_h, __fn_shininess_checkerboard_material, sizeof(fnShininess_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_transmissive_h, __fn_transmissive_checkerboard_material, sizeof(fnTransmissive_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_refractive_index_h, __fn_refractive_index_checkerboard_material, sizeof(fnRefractiveIndex_t))); } PX_CUDA_CHECK(cudaMemcpy(_gpu_obj, _obj, sizeof(BaseCheckerboardMaterial), cudaMemcpyHostToDevice)); MaterialObj tmp(_gpu_obj, fn_ambient_h, fn_diffuse_h, fn_specular_h, fn_shininess_h, fn_transmissive_h, fn_refractive_index_h); PX_CUDA_CHECK(cudaMemcpy(dev_ptr, &tmp, sizeof(MaterialObj), cudaMemcpyHostToDevice)); _need_upload = false; } #endif } void CheckerboardMaterial::clearGpuData() { #ifdef USE_CUDA if (_gpu_obj != nullptr) { _gpu_obj = nullptr; PX_CUDA_CHECK(cudaFree(_gpu_obj)); } BaseMaterial::clearGpuData(); #endif } PREC CheckerboardMaterial::Shininess(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getShininess(_obj, u, v, w); } PREC CheckerboardMaterial::refractiveIndex(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getRefractiveIndex(_obj, u, v, w); } Light CheckerboardMaterial::getAmbient(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getAmbient(_obj, u, v, w); } Light CheckerboardMaterial::getDiffuse(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getDiffuse(_obj, u, v, w); } Light CheckerboardMaterial::getSpecular(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getSpecular(_obj, u, v, w); } Light CheckerboardMaterial::getTransmissive(PREC const &u, PREC const &v, PREC const &w) const { return BaseCheckerboardMaterial::getTransmissive(_obj, u, v, w); } void CheckerboardMaterial::setAmbient(Light const &ambient) { _obj->setAmbient(ambient); #ifdef USE_CUDA _need_upload = true; #endif } void CheckerboardMaterial::setDiffuse(Light const &diffuse) { _obj->setDiffuse(diffuse); #ifdef USE_CUDA _need_upload = true; #endif } void CheckerboardMaterial::setSpecular(Light const &specular) { _obj->setSpecular(specular); #ifdef USE_CUDA _need_upload = true; #endif } void CheckerboardMaterial::setShininess(PREC const &shininess) { _obj->setShininess(shininess); #ifdef USE_CUDA _need_upload = true; #endif } void CheckerboardMaterial::setTransmissive(Light const &transmissive) { _obj->setTransmissive(transmissive); #ifdef USE_CUDA _need_upload = true; #endif } void CheckerboardMaterial::setRefractiveIndex(PREC const &ior) { _obj->setRefractiveIndex(ior); #ifdef USE_CUDA _need_upload = true; #endif } void CheckerboardMaterial::setDimScale(PREC const &scale) { _obj->setDimScale(scale); #ifdef USE_CUDA _need_upload = true; #endif } void CheckerboardMaterial::setColorScale(PREC const &scale) { _obj->setColorScale(scale); #ifdef USE_CUDA _need_upload = true; #endif }
53576fe0ecaf962173ffdca98cd4e0d78b10941b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __LLSGPU_SOLVER_KERNEL_CU__ #define __LLSGPU_SOLVER_KERNEL_CU__ #define THETA_ACCUM_CNT 180 #define DATA_POINTS 6 #define F 100 #include "util.h" #include "cudautil.h" typedef struct { int x; int y; } edge_pixel_t; typedef struct { int x; int y; int a; int b; int theta; } ellipse_t; cuda_dim Solver_dim = { 64, 150 }; edge_pixel_t* h_edge_pixels = NULL; uint h_edge_pixels_cnt = 0; __device__ edge_pixel_t* d_edge_pixels = NULL; __device__ uint* d_center_accum = NULL; __device__ uint* d_axes_accum = NULL; __device__ uint* d_theta_accum = NULL; __host__ void setup_Solver() { hipError_t err; if (h_edge_pixels == NULL) { debug("process_input() should be run before setup_Solver()"); return; } err = hipMalloc((void **)&d_edge_pixels, sizeof(edge_pixel_t) * h_edge_pixels_cnt); if (err != hipSuccess) debug("%s",hipGetErrorString(err)); err = hipMemcpy(d_edge_pixels, h_edge_pixels, sizeof(edge_pixel_t) * h_edge_pixels_cnt, hipMemcpyHostToDevice); if (err != hipSuccess) debug("%s",hipGetErrorString(err)); } __host__ void setup_Solver_accum(uint width, uint height) { int size = width * height; hipError_t err; // setup the 2d center accumulator err = hipMalloc((void **)&d_center_accum, sizeof(uint) * size); if (err != hipSuccess) debug("center accum malloc: %s",hipGetErrorString(err)); err = hipMemset(d_center_accum, 0, sizeof(uint) * size); if (err != hipSuccess) debug("center accum memset: %s",hipGetErrorString(err)); // setup the 2d axes accumulator err = hipMalloc((void **)&d_axes_accum, sizeof(uint) * size); if (err != hipSuccess) debug("axes accum malloc: %s",hipGetErrorString(err)); err = hipMemset(d_axes_accum, 0, sizeof(uint) * size); if (err != hipSuccess) debug("axes accum memset: %s",hipGetErrorString(err)); // setup the 1d theta accumulator err = hipMalloc((void **)&d_theta_accum, sizeof(uint) * THETA_ACCUM_CNT); if (err != hipSuccess) debug("theta accum malloc: %s",hipGetErrorString(err)); err = hipMemset(d_theta_accum, 0, sizeof(uint) * THETA_ACCUM_CNT); if (err != hipSuccess) debug("theta accum memset: %s",hipGetErrorString(err)); } __host__ void teardown_Solver() { hipError_t err; if (d_edge_pixels != NULL) { err = hipFree(d_edge_pixels); if (err != hipSuccess) debug("d_edge_pixel free: %s",hipGetErrorString(err)); d_edge_pixels = NULL; } if (h_edge_pixels != NULL) { delete [] h_edge_pixels; h_edge_pixels = NULL; } } __host__ void teardown_Solver_accum() { hipError_t err; if (d_theta_accum != NULL) { err = hipFree(d_theta_accum); if (err != hipSuccess) debug("theta accum free: %s",hipGetErrorString(err)); d_theta_accum = NULL; } if (d_axes_accum != NULL) { err = hipFree(d_axes_accum); if (err != hipSuccess) debug("axes accum free: %s",hipGetErrorString(err)); d_axes_accum = NULL; } if (d_center_accum != NULL) { err = hipFree(d_center_accum); if (err != hipSuccess) debug("center accum free: %s",hipGetErrorString(err)); d_center_accum = NULL; } } // implemented from psudocode at: http://en.wikipedia.org/wiki/Gaussian_elimination#Pseudocode // gaussian elimination with partial pivoting // results is ordered x1,x2,x3...xn __device__ void gauss(double (*A)[DATA_POINTS], uint rows, uint cols, double* results) { int i,j,k,u,maxi; double var, t; i = 0; j = 0; while (i < rows && j < cols) { // find the row with the maximum value maxi = i; for (k=i+1; k < rows; k++) { if (fabs(A[k][j]) > fabs(A[maxi][j])) maxi = k; } if (A[maxi][j] != 0) { // swap rows if (i != maxi) { for (u=0; u < cols; u++) { t = A[i][u]; A[i][u] = A[maxi][u]; A[maxi][u] = t; } } // reduce pivot element to 1 var = A[i][j]; for (k=0; k < cols; k++) A[i][k] /= var; // remove the pivot element from all subsequent rows for (u=i+1; u < rows; u++) { var = A[u][j]; for (k=j; k < cols; k++) A[u][k] -= A[i][k] * var; } i++; } j++; } // retrieve the results for (i=rows-1; i >= 0; i--) { var = A[i][cols-1]; for (j=cols-2; j > i; j--) { var -= A[i][j] * results[j]; } results[i] = var; } } __device__ void accumulator_3(ellipse_t* ellipse, uint* d_center_accum, uint* d_axes_accum, uint* d_theta_accum, uint width, uint height) { uint *center_ptr, *axes_ptr, *theta_ptr, offset; center_ptr = axes_ptr = theta_ptr = NULL; offset = ellipse->y * width + ellipse->x; if (offset < width * height && ellipse->x >= 0 && ellipse->x < width && ellipse->y >= 0 && ellipse->y < height) center_ptr = &d_center_accum[offset]; offset = ellipse->b * width + ellipse->a; if (offset < width * height && ellipse->a >= 0 && ellipse->a < width && ellipse->b >= 0 && ellipse->b < height) axes_ptr = &d_axes_accum[offset]; ellipse->theta += THETA_ACCUM_CNT / 2; if (ellipse->theta >= 0 && ellipse->theta < THETA_ACCUM_CNT) theta_ptr = &d_theta_accum[ellipse->theta]; if (center_ptr != NULL && axes_ptr != NULL && theta_ptr != NULL) { //printf("%d %d: %x %x %x\t%dx%d %dx%d %d\n", blockIdx.x,threadIdx.x, center_ptr, axes_ptr, theta_ptr,ellipse->x,ellipse->y,ellipse->a,ellipse->b,ellipse->theta-90); atomicAdd(center_ptr,1); atomicAdd(axes_ptr,1); atomicAdd(theta_ptr,1); } } __global__ void Solver_kernel( uint cnt, edge_pixel_t* d_edge_pixels, uint d_edge_pixels_cnt, uint* d_rnds, uint* d_center_accum, uint* d_axes_accum, uint* d_theta_accum, uint width, uint height ) { int next_rnd, x2, y2, xy, rnd, i, j, m; double X[DATA_POINTS][5], Xt[5][DATA_POINTS], aug[5][DATA_POINTS], results[5]; double A, B, C, D, E, J, delta, t, r1, r2, slope1, slope2; edge_pixel_t* pxl; ellipse_t ellipse; // figure out how many pieces of evidence to collect next_rnd = (blockIdx.x * blockDim.x + threadIdx.x) * cnt * 6; // compute the requried evidence for(; cnt > 0; cnt--) { // generate X and Xt from random edge points for(i=0; i < DATA_POINTS; i++) { rnd = (d_rnds[next_rnd++] / (float)0xFFFFFFFF) * (d_edge_pixels_cnt); pxl = &d_edge_pixels[rnd]; x2 = pxl->x * pxl->x; y2 = pxl->y * pxl->y; xy = pxl->x * pxl->y; Xt[0][i] = X[i][0] = x2; Xt[1][i] = X[i][1] = y2; Xt[2][i] = X[i][2] = xy; Xt[3][i] = X[i][3] = pxl->x; Xt[4][i] = X[i][4] = pxl->y; } // generate the augmented matrix from X, Xt, and Y for (i=0; i < 5; i++) { for (j=0; j < 5; j++) { aug[i][j] = 0.0; for (m=0; m < 6; m++) aug[i][j] += Xt[i][m] * X[m][j]; } } for (i=0; i < 5; i++) { aug[i][5] = 0.0; for (m=0; m < 6; m++) aug[i][5] += Xt[i][m] * F; } // solve the setup general quadratic gauss(aug,5,6,results); A = results[0]; B = results[1]; C = results[2]; D = results[3]; E = results[4]; // calc j to determine if we have an ellipse J = (A * B) - ((C * C) / 4.0); // determine if we have a circle if (J > 0.0 || fabs(J) <= FP_PRE) { // recover the parameters delta = (A * B * -F) + (C * E * D) / 8 + (D * C * E) / 8 - (D * D * B) / 4 - (A * E * E) / 4 - (C * C * -F) / 4; t = sqrt((B - A) * (B - A) + C * C); r1 = (A + B + t) / 2.0; r2 = (A + B - t) / 2.0; t = (C * C - 4.0 * A * B); ellipse.x = (2.0 * B * D - C * E) / t; ellipse.y = (2.0 * A * E - C * D) / t; ellipse.a = (int)floor(sqrt(fabs(delta) / fabs(J * r2))); ellipse.b = (int)floor(sqrt(fabs(delta) / fabs(J * r1))); t = (B - A) / C; slope1 = sqrt((t * t) + 1.0) + t; slope2 = -sqrt((t * t) + 1.0) + t; ellipse.theta = atan(slope2) / M_PI * 180.0; if (ellipse.b > ellipse.a) { t = ellipse.a; ellipse.a = ellipse.b; ellipse.b = t; ellipse.theta = (int)(atan(slope1) / M_PI * 180.0 + 0.5); if (C < 0) ellipse.theta += 90; } accumulator_3(&ellipse,d_center_accum,d_axes_accum,d_theta_accum,width,height); } } // make sure everybody is done before we go __syncthreads(); } #endif
53576fe0ecaf962173ffdca98cd4e0d78b10941b.cu
#ifndef __LLSGPU_SOLVER_KERNEL_CU__ #define __LLSGPU_SOLVER_KERNEL_CU__ #define THETA_ACCUM_CNT 180 #define DATA_POINTS 6 #define F 100 #include "util.h" #include "cudautil.h" typedef struct { int x; int y; } edge_pixel_t; typedef struct { int x; int y; int a; int b; int theta; } ellipse_t; cuda_dim Solver_dim = { 64, 150 }; edge_pixel_t* h_edge_pixels = NULL; uint h_edge_pixels_cnt = 0; __device__ edge_pixel_t* d_edge_pixels = NULL; __device__ uint* d_center_accum = NULL; __device__ uint* d_axes_accum = NULL; __device__ uint* d_theta_accum = NULL; __host__ void setup_Solver() { cudaError_t err; if (h_edge_pixels == NULL) { debug("process_input() should be run before setup_Solver()"); return; } err = cudaMalloc((void **)&d_edge_pixels, sizeof(edge_pixel_t) * h_edge_pixels_cnt); if (err != cudaSuccess) debug("%s",cudaGetErrorString(err)); err = cudaMemcpy(d_edge_pixels, h_edge_pixels, sizeof(edge_pixel_t) * h_edge_pixels_cnt, cudaMemcpyHostToDevice); if (err != cudaSuccess) debug("%s",cudaGetErrorString(err)); } __host__ void setup_Solver_accum(uint width, uint height) { int size = width * height; cudaError_t err; // setup the 2d center accumulator err = cudaMalloc((void **)&d_center_accum, sizeof(uint) * size); if (err != cudaSuccess) debug("center accum malloc: %s",cudaGetErrorString(err)); err = cudaMemset(d_center_accum, 0, sizeof(uint) * size); if (err != cudaSuccess) debug("center accum memset: %s",cudaGetErrorString(err)); // setup the 2d axes accumulator err = cudaMalloc((void **)&d_axes_accum, sizeof(uint) * size); if (err != cudaSuccess) debug("axes accum malloc: %s",cudaGetErrorString(err)); err = cudaMemset(d_axes_accum, 0, sizeof(uint) * size); if (err != cudaSuccess) debug("axes accum memset: %s",cudaGetErrorString(err)); // setup the 1d theta accumulator err = cudaMalloc((void **)&d_theta_accum, sizeof(uint) * THETA_ACCUM_CNT); if (err != cudaSuccess) debug("theta accum malloc: %s",cudaGetErrorString(err)); err = cudaMemset(d_theta_accum, 0, sizeof(uint) * THETA_ACCUM_CNT); if (err != cudaSuccess) debug("theta accum memset: %s",cudaGetErrorString(err)); } __host__ void teardown_Solver() { cudaError_t err; if (d_edge_pixels != NULL) { err = cudaFree(d_edge_pixels); if (err != cudaSuccess) debug("d_edge_pixel free: %s",cudaGetErrorString(err)); d_edge_pixels = NULL; } if (h_edge_pixels != NULL) { delete [] h_edge_pixels; h_edge_pixels = NULL; } } __host__ void teardown_Solver_accum() { cudaError_t err; if (d_theta_accum != NULL) { err = cudaFree(d_theta_accum); if (err != cudaSuccess) debug("theta accum free: %s",cudaGetErrorString(err)); d_theta_accum = NULL; } if (d_axes_accum != NULL) { err = cudaFree(d_axes_accum); if (err != cudaSuccess) debug("axes accum free: %s",cudaGetErrorString(err)); d_axes_accum = NULL; } if (d_center_accum != NULL) { err = cudaFree(d_center_accum); if (err != cudaSuccess) debug("center accum free: %s",cudaGetErrorString(err)); d_center_accum = NULL; } } // implemented from psudocode at: http://en.wikipedia.org/wiki/Gaussian_elimination#Pseudocode // gaussian elimination with partial pivoting // results is ordered x1,x2,x3...xn __device__ void gauss(double (*A)[DATA_POINTS], uint rows, uint cols, double* results) { int i,j,k,u,maxi; double var, t; i = 0; j = 0; while (i < rows && j < cols) { // find the row with the maximum value maxi = i; for (k=i+1; k < rows; k++) { if (fabs(A[k][j]) > fabs(A[maxi][j])) maxi = k; } if (A[maxi][j] != 0) { // swap rows if (i != maxi) { for (u=0; u < cols; u++) { t = A[i][u]; A[i][u] = A[maxi][u]; A[maxi][u] = t; } } // reduce pivot element to 1 var = A[i][j]; for (k=0; k < cols; k++) A[i][k] /= var; // remove the pivot element from all subsequent rows for (u=i+1; u < rows; u++) { var = A[u][j]; for (k=j; k < cols; k++) A[u][k] -= A[i][k] * var; } i++; } j++; } // retrieve the results for (i=rows-1; i >= 0; i--) { var = A[i][cols-1]; for (j=cols-2; j > i; j--) { var -= A[i][j] * results[j]; } results[i] = var; } } __device__ void accumulator_3(ellipse_t* ellipse, uint* d_center_accum, uint* d_axes_accum, uint* d_theta_accum, uint width, uint height) { uint *center_ptr, *axes_ptr, *theta_ptr, offset; center_ptr = axes_ptr = theta_ptr = NULL; offset = ellipse->y * width + ellipse->x; if (offset < width * height && ellipse->x >= 0 && ellipse->x < width && ellipse->y >= 0 && ellipse->y < height) center_ptr = &d_center_accum[offset]; offset = ellipse->b * width + ellipse->a; if (offset < width * height && ellipse->a >= 0 && ellipse->a < width && ellipse->b >= 0 && ellipse->b < height) axes_ptr = &d_axes_accum[offset]; ellipse->theta += THETA_ACCUM_CNT / 2; if (ellipse->theta >= 0 && ellipse->theta < THETA_ACCUM_CNT) theta_ptr = &d_theta_accum[ellipse->theta]; if (center_ptr != NULL && axes_ptr != NULL && theta_ptr != NULL) { //printf("%d %d: %x %x %x\t%dx%d %dx%d %d\n", blockIdx.x,threadIdx.x, center_ptr, axes_ptr, theta_ptr,ellipse->x,ellipse->y,ellipse->a,ellipse->b,ellipse->theta-90); atomicAdd(center_ptr,1); atomicAdd(axes_ptr,1); atomicAdd(theta_ptr,1); } } __global__ void Solver_kernel( uint cnt, edge_pixel_t* d_edge_pixels, uint d_edge_pixels_cnt, uint* d_rnds, uint* d_center_accum, uint* d_axes_accum, uint* d_theta_accum, uint width, uint height ) { int next_rnd, x2, y2, xy, rnd, i, j, m; double X[DATA_POINTS][5], Xt[5][DATA_POINTS], aug[5][DATA_POINTS], results[5]; double A, B, C, D, E, J, delta, t, r1, r2, slope1, slope2; edge_pixel_t* pxl; ellipse_t ellipse; // figure out how many pieces of evidence to collect next_rnd = (blockIdx.x * blockDim.x + threadIdx.x) * cnt * 6; // compute the requried evidence for(; cnt > 0; cnt--) { // generate X and Xt from random edge points for(i=0; i < DATA_POINTS; i++) { rnd = (d_rnds[next_rnd++] / (float)0xFFFFFFFF) * (d_edge_pixels_cnt); pxl = &d_edge_pixels[rnd]; x2 = pxl->x * pxl->x; y2 = pxl->y * pxl->y; xy = pxl->x * pxl->y; Xt[0][i] = X[i][0] = x2; Xt[1][i] = X[i][1] = y2; Xt[2][i] = X[i][2] = xy; Xt[3][i] = X[i][3] = pxl->x; Xt[4][i] = X[i][4] = pxl->y; } // generate the augmented matrix from X, Xt, and Y for (i=0; i < 5; i++) { for (j=0; j < 5; j++) { aug[i][j] = 0.0; for (m=0; m < 6; m++) aug[i][j] += Xt[i][m] * X[m][j]; } } for (i=0; i < 5; i++) { aug[i][5] = 0.0; for (m=0; m < 6; m++) aug[i][5] += Xt[i][m] * F; } // solve the setup general quadratic gauss(aug,5,6,results); A = results[0]; B = results[1]; C = results[2]; D = results[3]; E = results[4]; // calc j to determine if we have an ellipse J = (A * B) - ((C * C) / 4.0); // determine if we have a circle if (J > 0.0 || fabs(J) <= FP_PRE) { // recover the parameters delta = (A * B * -F) + (C * E * D) / 8 + (D * C * E) / 8 - (D * D * B) / 4 - (A * E * E) / 4 - (C * C * -F) / 4; t = sqrt((B - A) * (B - A) + C * C); r1 = (A + B + t) / 2.0; r2 = (A + B - t) / 2.0; t = (C * C - 4.0 * A * B); ellipse.x = (2.0 * B * D - C * E) / t; ellipse.y = (2.0 * A * E - C * D) / t; ellipse.a = (int)floor(sqrt(fabs(delta) / fabs(J * r2))); ellipse.b = (int)floor(sqrt(fabs(delta) / fabs(J * r1))); t = (B - A) / C; slope1 = sqrt((t * t) + 1.0) + t; slope2 = -sqrt((t * t) + 1.0) + t; ellipse.theta = atan(slope2) / M_PI * 180.0; if (ellipse.b > ellipse.a) { t = ellipse.a; ellipse.a = ellipse.b; ellipse.b = t; ellipse.theta = (int)(atan(slope1) / M_PI * 180.0 + 0.5); if (C < 0) ellipse.theta += 90; } accumulator_3(&ellipse,d_center_accum,d_axes_accum,d_theta_accum,width,height); } } // make sure everybody is done before we go __syncthreads(); } #endif
2e57499fb27912a5feeee306fbf1cf70d7a5e759.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void saxpy_float4s_shmem_doublebuffer ( float* y, float* x, float a, clock_t * timer_vals) { volatile __shared__ float sdata_x0_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x1_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x2_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x3_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y0_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y1_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y2_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y3_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x0_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x1_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x2_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x3_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y0_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y1_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y2_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y3_1 [COMPUTE_THREADS_PER_CTA]; int tid = threadIdx.x ; unsigned int idx0, idx1; idx0 = blockIdx.x * COMPUTE_THREADS_PER_CTA + tid; idx1 = COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + tid; float4 * x_as_float4 = (float4 *)x; float4 * y_as_float4 = (float4 *)y; float4 result_y; for (int i=0; i < NUM_ITERS/4; i+=2) { float4 tmp1_x, tmp1_y; __syncthreads(); tmp1_x = x_as_float4[idx0]; tmp1_y = y_as_float4[idx0]; if (i!=0) { result_y.x = a * sdata_x0_1[tid] + sdata_y0_1[tid]; result_y.y = a * sdata_x1_1[tid] + sdata_y1_1[tid]; result_y.z = a * sdata_x2_1[tid] + sdata_y2_1[tid]; result_y.w = a * sdata_x3_1[tid] + sdata_y3_1[tid]; y_as_float4[idx1] = result_y; idx1 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ; } sdata_x0_0[tid] = tmp1_x.x; sdata_x1_0[tid] = tmp1_x.y; sdata_x2_0[tid] = tmp1_x.z; sdata_x3_0[tid] = tmp1_x.w; sdata_y0_0[tid] = tmp1_y.x; sdata_y1_0[tid] = tmp1_y.y; sdata_y2_0[tid] = tmp1_y.z; sdata_y3_0[tid] = tmp1_y.w; __syncthreads(); tmp1_x = x_as_float4[idx1]; tmp1_y = y_as_float4[idx1]; result_y.x = a * sdata_x0_0[tid] + sdata_y0_0[tid]; result_y.y = a * sdata_x1_0[tid] + sdata_y1_0[tid]; result_y.z = a * sdata_x2_0[tid] + sdata_y2_0[tid]; result_y.w = a * sdata_x3_0[tid] + sdata_y3_0[tid]; y_as_float4[idx0] = result_y; idx0 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ; sdata_x0_1[tid] = tmp1_x.x; sdata_x1_1[tid] = tmp1_x.y; sdata_x2_1[tid] = tmp1_x.z; sdata_x3_1[tid] = tmp1_x.w; sdata_y0_1[tid] = tmp1_y.x; sdata_y1_1[tid] = tmp1_y.y; sdata_y2_1[tid] = tmp1_y.z; sdata_y3_1[tid] = tmp1_y.w; } __syncthreads(); result_y.x = a * sdata_x0_1[tid] + sdata_y0_1[tid]; result_y.y = a * sdata_x1_1[tid] + sdata_y1_1[tid]; result_y.z = a * sdata_x2_1[tid] + sdata_y2_1[tid]; result_y.w = a * sdata_x3_1[tid] + sdata_y3_1[tid]; y_as_float4[idx1] = result_y; }
2e57499fb27912a5feeee306fbf1cf70d7a5e759.cu
#include "includes.h" __global__ void saxpy_float4s_shmem_doublebuffer ( float* y, float* x, float a, clock_t * timer_vals) { volatile __shared__ float sdata_x0_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x1_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x2_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x3_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y0_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y1_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y2_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y3_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x0_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x1_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x2_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x3_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y0_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y1_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y2_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y3_1 [COMPUTE_THREADS_PER_CTA]; int tid = threadIdx.x ; unsigned int idx0, idx1; idx0 = blockIdx.x * COMPUTE_THREADS_PER_CTA + tid; idx1 = COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + tid; float4 * x_as_float4 = (float4 *)x; float4 * y_as_float4 = (float4 *)y; float4 result_y; for (int i=0; i < NUM_ITERS/4; i+=2) { float4 tmp1_x, tmp1_y; __syncthreads(); tmp1_x = x_as_float4[idx0]; tmp1_y = y_as_float4[idx0]; if (i!=0) { result_y.x = a * sdata_x0_1[tid] + sdata_y0_1[tid]; result_y.y = a * sdata_x1_1[tid] + sdata_y1_1[tid]; result_y.z = a * sdata_x2_1[tid] + sdata_y2_1[tid]; result_y.w = a * sdata_x3_1[tid] + sdata_y3_1[tid]; y_as_float4[idx1] = result_y; idx1 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ; } sdata_x0_0[tid] = tmp1_x.x; sdata_x1_0[tid] = tmp1_x.y; sdata_x2_0[tid] = tmp1_x.z; sdata_x3_0[tid] = tmp1_x.w; sdata_y0_0[tid] = tmp1_y.x; sdata_y1_0[tid] = tmp1_y.y; sdata_y2_0[tid] = tmp1_y.z; sdata_y3_0[tid] = tmp1_y.w; __syncthreads(); tmp1_x = x_as_float4[idx1]; tmp1_y = y_as_float4[idx1]; result_y.x = a * sdata_x0_0[tid] + sdata_y0_0[tid]; result_y.y = a * sdata_x1_0[tid] + sdata_y1_0[tid]; result_y.z = a * sdata_x2_0[tid] + sdata_y2_0[tid]; result_y.w = a * sdata_x3_0[tid] + sdata_y3_0[tid]; y_as_float4[idx0] = result_y; idx0 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ; sdata_x0_1[tid] = tmp1_x.x; sdata_x1_1[tid] = tmp1_x.y; sdata_x2_1[tid] = tmp1_x.z; sdata_x3_1[tid] = tmp1_x.w; sdata_y0_1[tid] = tmp1_y.x; sdata_y1_1[tid] = tmp1_y.y; sdata_y2_1[tid] = tmp1_y.z; sdata_y3_1[tid] = tmp1_y.w; } __syncthreads(); result_y.x = a * sdata_x0_1[tid] + sdata_y0_1[tid]; result_y.y = a * sdata_x1_1[tid] + sdata_y1_1[tid]; result_y.z = a * sdata_x2_1[tid] + sdata_y2_1[tid]; result_y.w = a * sdata_x3_1[tid] + sdata_y3_1[tid]; y_as_float4[idx1] = result_y; }
aaca8e52beaede943d96c8cc02835d47be7e5df7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/relu6_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLU6Forward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; out[index] = out[index] < Dtype(6.) ? out[index] : Dtype(6.); } } template <typename Dtype> void ReLU6Layer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu6_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLU6Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLU6Backward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope) * (in_data[index] < Dtype(6.)); } } template <typename Dtype> void ReLU6Layer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu6_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLU6Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLU6Layer); } // namespace caffe
aaca8e52beaede943d96c8cc02835d47be7e5df7.cu
#include <algorithm> #include <vector> #include "caffe/layers/relu6_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLU6Forward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; out[index] = out[index] < Dtype(6.) ? out[index] : Dtype(6.); } } template <typename Dtype> void ReLU6Layer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu6_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) ReLU6Forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLU6Backward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope) * (in_data[index] < Dtype(6.)); } } template <typename Dtype> void ReLU6Layer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu6_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) ReLU6Backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLU6Layer); } // namespace caffe
da1db010f289177594ba31f641440155425fb9c6.hip
// !!! This is a file automatically generated by hipify!!! /* * University of Illinois Open Source License * Copyright 2012-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Mike Hallock */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <string.h> #include <stdio.h> #include <math.h> #include "config.h" #include "SegmentDescriptor.h" #include "MultiGPUMapper.h" #include <pthread.h> #ifdef MPD_NUMA_SUPPORT #include <numa.h> #endif #include "cuda/lm_cuda.h" #include "core/Print.h" using lm::Print; #include <vector> #include <string> extern std::vector<int> cudaDevices; extern std::vector<int> numaNodes; extern bool mgpu_disablePeering; MultiGPUMapper::MultiGPUMapper(dim3 ldim, size_t cellsize, int apron=1, int overlap=0, int ngpus=0, int* devices=NULL, int pages=1) :lattice_dim(ldim), cellsize(cellsize), apron(apron), overlap(overlap), device_id(NULL), pagecount(pages) { if(! ngpus) { hipGetDeviceCount(&num_gpus); } else { num_gpus=ngpus; } device_id=new int[num_gpus]; pthread_key_create(&affinity,NULL); for(int g=0; g<num_gpus; g++) { Print::printf(Print::DEBUG, "[mgpu] device list %d: %d", g, devices[g]); device_id[g]=devices[g]; } descriptor=new SegmentDescriptor_s*[num_gpus]; lb_weights=new float[num_gpus]; lb_cost=new int[num_gpus]; device_memory=new size_t[num_gpus]; for(int i=0; i<num_gpus; i++) { //lm::CUDA::printCapabilities(device_id[i]); lb_weights[i]=1; lb_cost[i]=100; descriptor[i]=new SegmentDescriptor_s; device_memory[i]=lm::CUDA::getFreeMemory(device_id[i]); //Print::printf(Print::DEBUG, "[mgpu] Device %d free mem %llu\n", i, device_memory[i]); } // Uncomment for testing to artifically constrain memory // device_memory[0]=1024*1024*6; } MultiGPUMapper::~MultiGPUMapper() { if(device_id) delete device_id; pthread_key_delete(affinity); } int MultiGPUMapper::get_num_gpus() { return num_gpus; } bool MultiGPUMapper::use(int gpu) { if(gpu < 0 || gpu >= num_gpus) return false; hipError_t err=hipSetDevice(device_id[gpu]); return (err == hipSuccess); } int MultiGPUMapper::get_overlap() { return overlap; } int MultiGPUMapper::get_apron() { return apron; } void MultiGPUMapper::set_affinity(int gpu) { pthread_setspecific(affinity, reinterpret_cast<void *>(gpu)); } int MultiGPUMapper::get_affinity() { return (size_t)pthread_getspecific(affinity); } dim3 MultiGPUMapper::get_lattice_dim() { return lattice_dim; } SegmentDescriptor_s* MultiGPUMapper::getSegmentDescriptor(int gpu) { if(gpu < 0 || gpu >= num_gpus) return NULL; return descriptor[gpu]; } void MultiGPUMapper::build_descriptor(int gpu, dim3 ldim, int3 goffset, dim3 active, dim3 loffset) { SegmentDescriptor_s *seg=descriptor[gpu]; seg->local_dimensions=ldim; seg->global_offset=goffset; seg->active_dimensions=active; seg->active_offset=loffset; Print::printf(Print::DEBUG, "* Descriptor %d: " " local dim: %d x %d x %d\n" "* active dim: %d x %d x %d\n" "* global offset: %d x %d x %d\n" "* active offset: %d x %d x %d\n",gpu, ldim.x, ldim.y, ldim.z, active.x, active.y, active.z, goffset.x, goffset.y, goffset.z, loffset.x, loffset.y, loffset.z ); } bool MultiGPUMapper::enable_peer_access(int src, int dst) { if(dst < 0 || dst >= num_gpus) return false; if(mgpu_disablePeering) return false; bool is_peered=false; int local=device_id[src]; int peer=device_id[dst]; if(use(src)) { std::string msg; int can_access=0; hipDeviceCanAccessPeer(&can_access, local, peer); if(can_access) { switch(hipDeviceEnablePeerAccess(peer,0)) { case hipSuccess: case hipErrorPeerAccessAlreadyEnabled: msg="Peer access enabled"; hipGetLastError(); // clear out potential already-enabled error is_peered=true; break; default: msg="Peer access setup FAILED"; } } else { msg="NOTICE: peer access not available"; } Print::printf(Print::DEBUG, "%s from device %d to %d (logical %d->%d)", msg.c_str(), local, peer, src, dst); } return is_peered; } bool MultiGPUMapper::numa_bind_thread(int gpu) { #ifdef MPD_NUMA_SUPPORT if(gpu >= numaNodes.size()) return false; #ifdef DEBUG Print::printf(Print::DEBUG_VERBOSE, "Binding gpu thread %d to node %d", gpu, numaNodes[gpu]); #endif nodemask_t nm; nodemask_zero(&nm); nodemask_set(&nm, numaNodes[gpu]); numa_bind(&nm); return true; #else Print::printf(Print::DEBUG, "Built without -DMPD_NUMA_SUPPORT, cannot bind thread"); return false; #endif } void MultiGPUMapper::record_execution_cost(int gpu, int etime) { lb_cost[gpu]=etime; } bool MultiGPUMapper::rebalance() { if(! determine_load_balance()) return false; printf("\n*** Rebalance *** \n"); initialize(); printf("***************** \n\n"); return true; } void MultiGPUMapper::initialize_gpu(int gpu) { if(! use(gpu)) throw "Failed to use gpu!"; } size_t MultiGPUMapper::get_global_size() { return DIMSIZE(lattice_dim) * cellsize; }
da1db010f289177594ba31f641440155425fb9c6.cu
/* * University of Illinois Open Source License * Copyright 2012-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Mike Hallock */ #include <cuda.h> #include <cuda_runtime.h> #include <string.h> #include <stdio.h> #include <math.h> #include "config.h" #include "SegmentDescriptor.h" #include "MultiGPUMapper.h" #include <pthread.h> #ifdef MPD_NUMA_SUPPORT #include <numa.h> #endif #include "cuda/lm_cuda.h" #include "core/Print.h" using lm::Print; #include <vector> #include <string> extern std::vector<int> cudaDevices; extern std::vector<int> numaNodes; extern bool mgpu_disablePeering; MultiGPUMapper::MultiGPUMapper(dim3 ldim, size_t cellsize, int apron=1, int overlap=0, int ngpus=0, int* devices=NULL, int pages=1) :lattice_dim(ldim), cellsize(cellsize), apron(apron), overlap(overlap), device_id(NULL), pagecount(pages) { if(! ngpus) { cudaGetDeviceCount(&num_gpus); } else { num_gpus=ngpus; } device_id=new int[num_gpus]; pthread_key_create(&affinity,NULL); for(int g=0; g<num_gpus; g++) { Print::printf(Print::DEBUG, "[mgpu] device list %d: %d", g, devices[g]); device_id[g]=devices[g]; } descriptor=new SegmentDescriptor_s*[num_gpus]; lb_weights=new float[num_gpus]; lb_cost=new int[num_gpus]; device_memory=new size_t[num_gpus]; for(int i=0; i<num_gpus; i++) { //lm::CUDA::printCapabilities(device_id[i]); lb_weights[i]=1; lb_cost[i]=100; descriptor[i]=new SegmentDescriptor_s; device_memory[i]=lm::CUDA::getFreeMemory(device_id[i]); //Print::printf(Print::DEBUG, "[mgpu] Device %d free mem %llu\n", i, device_memory[i]); } // Uncomment for testing to artifically constrain memory // device_memory[0]=1024*1024*6; } MultiGPUMapper::~MultiGPUMapper() { if(device_id) delete device_id; pthread_key_delete(affinity); } int MultiGPUMapper::get_num_gpus() { return num_gpus; } bool MultiGPUMapper::use(int gpu) { if(gpu < 0 || gpu >= num_gpus) return false; cudaError_t err=cudaSetDevice(device_id[gpu]); return (err == cudaSuccess); } int MultiGPUMapper::get_overlap() { return overlap; } int MultiGPUMapper::get_apron() { return apron; } void MultiGPUMapper::set_affinity(int gpu) { pthread_setspecific(affinity, reinterpret_cast<void *>(gpu)); } int MultiGPUMapper::get_affinity() { return (size_t)pthread_getspecific(affinity); } dim3 MultiGPUMapper::get_lattice_dim() { return lattice_dim; } SegmentDescriptor_s* MultiGPUMapper::getSegmentDescriptor(int gpu) { if(gpu < 0 || gpu >= num_gpus) return NULL; return descriptor[gpu]; } void MultiGPUMapper::build_descriptor(int gpu, dim3 ldim, int3 goffset, dim3 active, dim3 loffset) { SegmentDescriptor_s *seg=descriptor[gpu]; seg->local_dimensions=ldim; seg->global_offset=goffset; seg->active_dimensions=active; seg->active_offset=loffset; Print::printf(Print::DEBUG, "* Descriptor %d: " " local dim: %d x %d x %d\n" "* active dim: %d x %d x %d\n" "* global offset: %d x %d x %d\n" "* active offset: %d x %d x %d\n",gpu, ldim.x, ldim.y, ldim.z, active.x, active.y, active.z, goffset.x, goffset.y, goffset.z, loffset.x, loffset.y, loffset.z ); } bool MultiGPUMapper::enable_peer_access(int src, int dst) { if(dst < 0 || dst >= num_gpus) return false; if(mgpu_disablePeering) return false; bool is_peered=false; int local=device_id[src]; int peer=device_id[dst]; if(use(src)) { std::string msg; int can_access=0; cudaDeviceCanAccessPeer(&can_access, local, peer); if(can_access) { switch(cudaDeviceEnablePeerAccess(peer,0)) { case cudaSuccess: case cudaErrorPeerAccessAlreadyEnabled: msg="Peer access enabled"; cudaGetLastError(); // clear out potential already-enabled error is_peered=true; break; default: msg="Peer access setup FAILED"; } } else { msg="NOTICE: peer access not available"; } Print::printf(Print::DEBUG, "%s from device %d to %d (logical %d->%d)", msg.c_str(), local, peer, src, dst); } return is_peered; } bool MultiGPUMapper::numa_bind_thread(int gpu) { #ifdef MPD_NUMA_SUPPORT if(gpu >= numaNodes.size()) return false; #ifdef DEBUG Print::printf(Print::DEBUG_VERBOSE, "Binding gpu thread %d to node %d", gpu, numaNodes[gpu]); #endif nodemask_t nm; nodemask_zero(&nm); nodemask_set(&nm, numaNodes[gpu]); numa_bind(&nm); return true; #else Print::printf(Print::DEBUG, "Built without -DMPD_NUMA_SUPPORT, cannot bind thread"); return false; #endif } void MultiGPUMapper::record_execution_cost(int gpu, int etime) { lb_cost[gpu]=etime; } bool MultiGPUMapper::rebalance() { if(! determine_load_balance()) return false; printf("\n*** Rebalance *** \n"); initialize(); printf("***************** \n\n"); return true; } void MultiGPUMapper::initialize_gpu(int gpu) { if(! use(gpu)) throw "Failed to use gpu!"; } size_t MultiGPUMapper::get_global_size() { return DIMSIZE(lattice_dim) * cellsize; }
eb7080c7b5cadc9e7346276dcd109292a27a7568.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <iostream> #include <assert.h> // #include <./inc/helper_cuda.h> //use on win 10 #include "helper_cuda.h" // use on linux //#include "Utilities.cuh" #include <chrono> using namespace std; using namespace std::chrono; #include <hip/hip_runtime.h> #include <cusparse_v2.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } #include <iostream> #include <fstream> #include <vector> #include "device_launch_parameters.h" /// add to color your code #include<string> #include "graph.h" //#include "../common/common.h" // haha #define mian main //#define ? ; //#define ?, ////#define ? (; //#define ?) #define ture true #define flase false #define D(x) cout<<#x<<"="<<x<<endl; double* SMVP(double* h_C_dense, int* nnz_return, int N); double* SMVP_saveMemory(double* h_C_dense, int* nnz_return, int N); hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /***************************/ /* CUSPARSE ERROR CHECKING */ /***************************/ static const char* _cusparseGetErrorEnum(hipsparseStatus_t error) { switch (error) { case HIPSPARSE_STATUS_SUCCESS: return "HIPSPARSE_STATUS_SUCCESS"; case HIPSPARSE_STATUS_NOT_INITIALIZED: return "HIPSPARSE_STATUS_NOT_INITIALIZED"; case HIPSPARSE_STATUS_ALLOC_FAILED: return "HIPSPARSE_STATUS_ALLOC_FAILED"; case HIPSPARSE_STATUS_INVALID_VALUE: return "HIPSPARSE_STATUS_INVALID_VALUE"; case HIPSPARSE_STATUS_ARCH_MISMATCH: return "HIPSPARSE_STATUS_ARCH_MISMATCH"; case HIPSPARSE_STATUS_MAPPING_ERROR: return "HIPSPARSE_STATUS_MAPPING_ERROR"; case HIPSPARSE_STATUS_EXECUTION_FAILED: return "HIPSPARSE_STATUS_EXECUTION_FAILED"; case HIPSPARSE_STATUS_INTERNAL_ERROR: return "HIPSPARSE_STATUS_INTERNAL_ERROR"; case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case HIPSPARSE_STATUS_ZERO_PIVOT: return "HIPSPARSE_STATUS_ZERO_PIVOT"; } return "<unknown>"; } inline void __cusparseSafeCall(hipsparseStatus_t err, const char* file, const int line) { if (HIPSPARSE_STATUS_SUCCESS != err) { fprintf(stderr, "CUSPARSE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \ _cusparseGetErrorEnum(err)); \ hipDeviceReset(); assert(0); \ } } extern "C" void cusparseSafeCall(hipsparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); } double* tester(int N, double ratio) { ////int** a;/* double* a = (double*)malloc(sizeof(double*) * N * N + 1); ////////////////////////// N*N //cout << sizeof(a) << " ->size of a"; int count = 0;//1 srand(time(0)); // int M = N; // cow = col double PE = ratio / N; // for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) { a[i * N + j] = rand() % 2; if (a[i * N + j] == 1) count++; } // if (count > PE* M* N) { for (int n = count, i, j; n - PE * M * N > 0; ) { i = rand() % M; j = rand() % N; if (1 == a[i * N + j]) { a[i * N + j] = 0; n--; } } } else if (count < PE* M* N) { for (int n = count, i, j; PE * M * N - n > 0; ) { i = rand() % M; j = rand() % N; if (0 == a[i * N + j]) { a[i * N + j] = 1; n++; } } } return a; } void print_matrix(double* h_A_dense, int N) { for (int j = 0; j < N; j++) { for (int i = 0; i < N; i++) { //printf("%f \t", h_C_dense[i * N + j]); if (h_A_dense[j * N + i] != 0) { cout << 1 << " "; } else { cout << 0 << " "; } } printf("\n"); } printf("print_matrix done\n"); } /********/ /* MAIN */ /********/ int main(int args, char **argv) { std::cout << "Input: ./exe beg csr weight\n"; if (args != 5) { std::cout << "Wrong input\n"; return -1; } const char *beg_file = argv[1]; const char *csr_file = argv[2]; const char *weight_file = argv[3]; const int N =atoi(argv[4]); std::cout << "Normal 19!!! "; //template <file_vertex_t, file_index_t, file_weight_t //new_vertex_t, new_index_t, new_weight_t> graph<long, long, /*int*/ long, long, long, /* char*/ long> *ginst = new graph<long, long, /*int*/ long, long, long, /*char*/ long>(beg_file, csr_file, weight_file); std::cout << "\n"; std::cout << "New Start" << "\n"; // --- Initialize cuSPARSE hipsparseHandle_t handle; hipsparseCreate(&handle); /**************************/ /* SETTING UP THE PROBLEM */ /**************************/ // const int N = 16384; // --- Number of rows and columns // --- Host side dense matrices double* h_A_dense = (double*)malloc(N * N * sizeof(*h_A_dense)); // double* h_C_dense = (double*)malloc(N * N * sizeof(*h_C_dense)); double* h_C_dense; //// --- Column-major ordering //h_A_dense[0] = 0.4612; h_A_dense[4] = -0.0006; h_A_dense[8] = 0.3566; h_A_dense[12] = 0.0; //h_A_dense[1] = -0.0006; h_A_dense[5] = 0.4640; h_A_dense[9] = 0.0723; h_A_dense[13] = 0.0; //h_A_dense[2] = 0.3566; h_A_dense[6] = 0.0723; h_A_dense[10] = 0.7543; h_A_dense[14] = 0.0; //h_A_dense[3] = 0.; h_A_dense[7] = 0.0; h_A_dense[11] = 0.0; h_A_dense[15] = 0.1; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float arr1[16384]; float arr2[16384]; int i = 0; // string path = "delaunay_n14.txt"; // ifstream myfile(path);// delaunay_n14 toy // if (!myfile) { // cout << "Unable to open myfile"; // exit(1); // terminate with error // } // else // { // fstream f(path); // vector<string> words; // string line; // while (getline(f, line)) // { // words.push_back(line); // } //dictionary.txtcsdn4 // cout << "Num of edge:" << words.size() << endl; // /*for (int i = 0; i < words.size(); i++) // { cout << words[i] << endl; }*/ // char str[64] = { 0 }; for (int i = 0; i < N * N; i++) { h_A_dense[i] = 0; if (i % N == i / N) { h_A_dense[i] = 1; } } for (int i = 0; i < ginst->vert_count; i++) { int beg = ginst->beg_pos[i]; int end = ginst->beg_pos[i + 1]; //std::cout<<beg<<"beg "<<"\n"; //std::cout<<end<<"end "<<"\n"; // std::cout << i << " -> "; // std::cout<<i<<"'s outgoing money: "; for (int j = beg; j < end; j++) { // std::cout << ginst->csr[j] << " "; h_A_dense[ i *N + ginst->csr[j] ] = 1; } // std::cout << "\n"; // cout << "Normal at line 160!!! "; } cout<<ginst->vert_count<< " "<< ginst->beg_pos[ginst->vert_count]<<endl; // for (int i = 0; i < words.size(); i++) // { // myfile.getline(str, 64); // //cout << words[i] << "\n"; // sscanf(str, "%f %f", &arr1[i], &arr2[i]); // cout <<i<<" : " << arr1[i] * N << " , " << arr2[i] << " is 1\n"; // h_A_dense[(int)((arr1[i]) * N + arr2[i])] = 1; ///(arr1[i]-1) don't use it ! // } // } //print_matrix(h_A_dense, N); // h_B_dense = h_A_dense; int nnz_return = 0; hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for (int i = 0; i < N-1; i++) { int temp = nnz_return; h_A_dense = SMVP_saveMemory(h_A_dense, &nnz_return, N); cout << "-------------------nnz= " << nnz_return<<" "<< i << " round\n"; hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); std::cout << "CUDA spent " << elapsedTime << " ms"<<std::endl; if (temp == nnz_return) { cout << "finish after "<< i<< " round"; hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); std::cout << "CUDA spent " << elapsedTime << " ms"<<std::endl; exit(0); // terminate with error } } //cout << "finish after " << N-1 << " round"; hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); std::cout << "CUDA spent " << elapsedTime << std::endl; exit(0); double* h_B_dense;// debug-test // --- Create device arrays and copy host arrays to them double* d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); double* d_B_dense; gpuErrchk(hipMalloc(&d_B_dense, N * N * sizeof(*d_B_dense))); double* d_C_dense; gpuErrchk(hipMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(hipMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_B_dense, h_B_dense, N * N * sizeof(*d_B_dense), hipMemcpyHostToDevice)); // --- Descriptor for sparse matrix A hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); cusparseSafeCall(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix B hipsparseMatDescr_t descrB; cusparseSafeCall(hipsparseCreateMatDescr(&descrB)); cusparseSafeCall(hipsparseSetMatType(descrB, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrB, HIPSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C hipsparseMatDescr_t descrC; cusparseSafeCall(hipsparseCreateMatDescr(&descrC)); cusparseSafeCall(hipsparseSetMatType(descrC, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrC, HIPSPARSE_INDEX_BASE_ONE)); int nnzA = 0; // --- Number of nonzero elements in dense matrix A int nnzB = 0; // --- Number of nonzero elements in dense matrix B const int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A int* d_nnzPerVectorA; gpuErrchk(hipMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Device side number of nonzero elements per row of matrix B int* d_nnzPerVectorB; gpuErrchk(hipMalloc(&d_nnzPerVectorB, N * sizeof(*d_nnzPerVectorB))); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, N, N, descrB, d_B_dense, lda, d_nnzPerVectorB, &nnzB)); // --- Host side number of nonzero elements per row of matrix A int* h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(hipMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), hipMemcpyDeviceToHost)); // --- Host side number of nonzero elements per row of matrix B int* h_nnzPerVectorB = (int*)malloc(N * sizeof(*h_nnzPerVectorB)); gpuErrchk(hipMemcpy(h_nnzPerVectorB, d_nnzPerVectorB, N * sizeof(*h_nnzPerVectorB), hipMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix double* d_A; gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); double* d_B; gpuErrchk(hipMalloc(&d_B, nnzB * sizeof(*d_B))); int* d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); int* d_B_RowIndices; gpuErrchk(hipMalloc(&d_B_RowIndices, (N + 1) * sizeof(*d_B_RowIndices))); int* d_C_RowIndices; gpuErrchk(hipMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); int* d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); int* d_B_ColIndices; gpuErrchk(hipMalloc(&d_B_ColIndices, nnzB * sizeof(*d_B_ColIndices))); cusparseSafeCall(hipsparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); cusparseSafeCall(hipsparseDdense2csr(handle, N, N, descrB, d_B_dense, lda, d_nnzPerVectorB, d_B, d_B_RowIndices, d_B_ColIndices)); // --- Host side sparse matrices double* h_A = (double*)malloc(nnzA * sizeof(*h_A)); double* h_B = (double*)malloc(nnzB * sizeof(*h_B)); int* h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); int* h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); int* h_B_RowIndices = (int*)malloc((N + 1) * sizeof(*h_B_RowIndices)); int* h_B_ColIndices = (int*)malloc(nnzB * sizeof(*h_B_ColIndices)); int* h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); gpuErrchk(hipMemcpy(h_A, d_A, nnzA * sizeof(*h_A), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_B, d_B, nnzB * sizeof(*h_B), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_B_RowIndices, d_B_RowIndices, (N + 1) * sizeof(*h_B_RowIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_B_ColIndices, d_B_ColIndices, nnzB * sizeof(*h_B_ColIndices), hipMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication int baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory int* nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(hipsparseXcsrgemmNnz(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(hipMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&baseC, d_C_RowIndices, sizeof(int), hipMemcpyDeviceToHost)); nnzC -= baseC; } int* d_C_ColIndices; gpuErrchk(hipMalloc(&d_C_ColIndices, nnzC * sizeof(int))); double* d_C; gpuErrchk(hipMalloc(&d_C, nnzC * sizeof(double))); double* h_C = (double*)malloc(nnzC * sizeof(*h_C)); int* h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(hipsparseDcsrgemm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(hipsparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); //gpuErrchk(hipMemcpy(h_C, d_C, nnzC * sizeof(*h_C), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), hipMemcpyDeviceToHost)); print_matrix(h_C_dense, N); //gpuErrchk(hipDeviceSynchronize()); //gpuErrchk(hipMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), hipMemcpyDeviceToHost)); //printf("before h_C_RowIndices\n"); //for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); ///////////////////////// has bug ///////////////////////////again ///////////////////////// matrix C give value to matrix A (CSR format) //d_A = d_C; d_A_RowIndices = d_C_RowIndices; d_A_ColIndices = d_C_ColIndices; //nnzA = nnzC; //gpuErrchk(hipDeviceSynchronize()); //gpuErrchk(hipMemcpy(h_C_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices), hipMemcpyDeviceToHost)); //printf("A copy C \n"); //for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); ////gpuErrchk(hipMemcpyToSymbol(d_A, d_C, nnzC * sizeof(double))); ////hipMemcpyToSymbol(d_A_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices)); ////hipMemcpyToSymbol(d_A_ColIndices, d_C_ColIndices, nnzC * sizeof(int)); ////// --- Performing the matrix - matrix multiplication ////baseC, nnzC = 0; ////// nnzTotalDevHostPtr points to host memory ////nnzTotalDevHostPtr = &nnzC; //cusparseSafeCall(hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST)); //hipFree(d_C_RowIndices); ///*int* d_C_RowIndices; */ gpuErrchk(hipMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); //cusparseSafeCall(hipsparseXcsrgemmNnz(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, // N, N, N, // descrA, nnzA, d_A_RowIndices, d_A_ColIndices, // descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, // d_C_RowIndices, nnzTotalDevHostPtr)); //if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; //else { // gpuErrchk(hipMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), hipMemcpyDeviceToHost)); // gpuErrchk(hipMemcpy(&baseC, d_C_RowIndices, sizeof(int), hipMemcpyDeviceToHost)); // nnzC -= baseC; //} //cusparseSafeCall(hipsparseDcsrgemm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, // N, N, N, // descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, // descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, // d_C, d_C_RowIndices, d_C_ColIndices)); //gpuErrchk(hipDeviceSynchronize()); //gpuErrchk(hipMemcpy(h_C_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices), hipMemcpyDeviceToHost)); //printf("result h_C_RowIndices \n"); //for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); ////cusparseSafeCall(hipsparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); //cusparseSafeCall(hipsparseDcsr2dense(handle, N, N, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, N)); //gpuErrchk(hipMemcpy(h_C_dense, d_A_dense, N * N * sizeof(double), hipMemcpyDeviceToHost)); //print_matrix(h_C_dense, N); h_A_dense = h_C_dense; cout << "new A: \n"; print_matrix(h_A_dense,N); // --- Create device arrays and copy host arrays to them /* double* d_A_dense; */ gpuErrchk(hipMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); /* double* d_C_dense; */ gpuErrchk(hipMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(hipMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), hipMemcpyHostToDevice)); // --- Descriptor for sparse matrix A cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); cusparseSafeCall(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C cusparseSafeCall(hipsparseCreateMatDescr(&descrC)); cusparseSafeCall(hipsparseSetMatType(descrC, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrC, HIPSPARSE_INDEX_BASE_ONE)); nnzA = 0; // --- Number of nonzero elements in dense matrix A //lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A gpuErrchk(hipMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Host side number of nonzero elements per row of matrix A h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(hipMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), hipMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); gpuErrchk(hipMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); gpuErrchk(hipMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); cusparseSafeCall(hipsparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); // --- Host side sparse matrices h_A = (double*)malloc(nnzA * sizeof(*h_A)); h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); gpuErrchk(hipMemcpy(h_A, d_A, nnzA * sizeof(*h_A), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(hipsparseXcsrgemmNnz(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(hipMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&baseC, d_C_RowIndices, sizeof(int), hipMemcpyDeviceToHost)); nnzC -= baseC; } gpuErrchk(hipMalloc(&d_C_ColIndices, nnzC * sizeof(int))); gpuErrchk(hipMalloc(&d_C, nnzC * sizeof(double))); h_C = (double*)malloc(nnzC * sizeof(*h_C)); h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(hipsparseDcsrgemm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(hipsparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); //gpuErrchk(hipMemcpy(h_C, d_C, nnzC * sizeof(*h_C), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), hipMemcpyDeviceToHost)); print_matrix(h_C_dense, N); cout << "new C: \n"; cout << "num of nnzC " << nnzC; cout << "use function\n\n "; nnz_return = 0; h_A_dense = SMVP(h_A_dense, &nnz_return, N); h_A_dense = SMVP(h_A_dense, &nnz_return, N); h_A_dense = SMVP(h_A_dense, &nnz_return, N); h_A_dense = SMVP(h_A_dense, &nnz_return, N); return 0; return 0; } double* SMVP(double* h_C_dense, int *nnz_return, int N) { double* h_A_dense = h_C_dense; //debug /*cout << "input A: \n"; print_matrix(h_A_dense, N);*/ hipsparseHandle_t handle; hipsparseCreate(&handle); // --- Create device arrays and copy host arrays to them double* d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); double* d_C_dense; gpuErrchk(hipMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(hipMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), hipMemcpyHostToDevice)); // --- Descriptor for sparse matrix A hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); cusparseSafeCall(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C hipsparseMatDescr_t descrC; cusparseSafeCall(hipsparseCreateMatDescr(&descrC)); cusparseSafeCall(hipsparseSetMatType(descrC, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrC, HIPSPARSE_INDEX_BASE_ONE)); int nnzA = 0; // --- Number of nonzero elements in dense matrix A // --- Number of nonzero elements in dense matrix B int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A int* d_nnzPerVectorA; gpuErrchk(hipMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Host side number of nonzero elements per row of matrix A int* h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(hipMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), hipMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix double* d_A; gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); int* d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); int* d_C_RowIndices; gpuErrchk(hipMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); int* d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); cusparseSafeCall(hipsparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); // --- Host side sparse matrices //double* h_A = (double*)malloc(nnzA * sizeof(*h_A)); //int* h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); //int* h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); //int* h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); //gpuErrchk(hipMemcpy(h_A, d_A, nnzA * sizeof(*h_A), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication int baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory int* nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(hipsparseXcsrgemmNnz(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(hipMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&baseC, d_C_RowIndices, sizeof(int), hipMemcpyDeviceToHost)); nnzC -= baseC; } int* d_C_ColIndices; gpuErrchk(hipMalloc(&d_C_ColIndices, nnzC * sizeof(int))); double* d_C; gpuErrchk(hipMalloc(&d_C, nnzC * sizeof(double))); double* h_C = (double*)malloc(nnzC * sizeof(*h_C)); int* h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(hipsparseDcsrgemm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(hipsparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); hipFree(descrC); hipFree(d_C); hipFree(d_C_RowIndices); hipFree(d_C_ColIndices); hipFree(descrA); hipFree(d_A); hipFree(d_A_RowIndices); hipFree(d_A_ColIndices); //gpuErrchk(hipMemcpy(h_C, d_C, nnzC * sizeof(*h_C), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), hipMemcpyDeviceToHost)); hipFree(d_C_dense); /*cout << "output \n"; print_matrix(h_C_dense, N);*/ cout << "nnzC " << nnzC<<"\n"; *nnz_return = nnzC; return h_C_dense; } double* SMVP_saveMemory(double* h_C_dense, int* nnz_return, int N) { double* h_A_dense = h_C_dense; //debug /*cout << "input A: \n"; print_matrix(h_A_dense, N);*/ hipsparseHandle_t handle; hipsparseCreate(&handle); // --- Create device arrays and copy host arrays to them double* d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); double* d_C_dense; gpuErrchk(hipMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(hipMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), hipMemcpyHostToDevice)); // --- Descriptor for sparse matrix A hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); cusparseSafeCall(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C hipsparseMatDescr_t descrC; cusparseSafeCall(hipsparseCreateMatDescr(&descrC)); cusparseSafeCall(hipsparseSetMatType(descrC, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrC, HIPSPARSE_INDEX_BASE_ONE)); int nnzA = 0; // --- Number of nonzero elements in dense matrix A // --- Number of nonzero elements in dense matrix B int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A int* d_nnzPerVectorA; gpuErrchk(hipMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Host side number of nonzero elements per row of matrix A int* h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(hipMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), hipMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix double* d_A; gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); int* d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); int* d_C_RowIndices; gpuErrchk(hipMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); int* d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); cusparseSafeCall(hipsparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); // --- Host side sparse matrices //double* h_A = (double*)malloc(nnzA * sizeof(*h_A)); //int* h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); //int* h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); //int* h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); //gpuErrchk(hipMemcpy(h_A, d_A, nnzA * sizeof(*h_A), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication int baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory int* nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(hipsparseXcsrgemmNnz(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(hipMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&baseC, d_C_RowIndices, sizeof(int), hipMemcpyDeviceToHost)); nnzC -= baseC; } int* d_C_ColIndices; gpuErrchk(hipMalloc(&d_C_ColIndices, nnzC * sizeof(int))); double* d_C; gpuErrchk(hipMalloc(&d_C, nnzC * sizeof(double))); double* h_C = (double*)malloc(nnzC * sizeof(*h_C)); int* h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(hipsparseDcsrgemm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(hipsparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); hipFree(descrC); hipFree(d_C); hipFree(d_C_RowIndices); hipFree(d_C_ColIndices); hipFree(descrA); hipFree(d_A); hipFree(d_A_RowIndices); hipFree(d_A_ColIndices); //gpuErrchk(hipMemcpy(h_C, d_C, nnzC * sizeof(*h_C), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), hipMemcpyDeviceToHost)); //gpuErrchk(hipMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), hipMemcpyDeviceToHost)); hipFree(d_C_dense); hipFree(d_A_dense); hipFree(d_nnzPerVectorA); free(h_nnzPerVectorA); /*cout << "output \n"; print_matrix(h_C_dense, N);*/ cout << "nnzC " << nnzC << "\n"; *nnz_return = nnzC; return h_C_dense; }
eb7080c7b5cadc9e7346276dcd109292a27a7568.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <assert.h> // #include <./inc/helper_cuda.h> //use on win 10 #include "helper_cuda.h" // use on linux //#include "Utilities.cuh" #include <chrono> using namespace std; using namespace std::chrono; #include <cuda_runtime.h> #include <cusparse_v2.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } #include <iostream> #include <fstream> #include <vector> #include "device_launch_parameters.h" /// add to color your code #include<string> #include "graph.h" //#include "../common/common.h" // haha #define mian main //#define ? ; //#define ?, ////#define ? (; //#define ?) #define ture true #define flase false #define D(x) cout<<#x<<"="<<x<<endl; double* SMVP(double* h_C_dense, int* nnz_return, int N); double* SMVP_saveMemory(double* h_C_dense, int* nnz_return, int N); cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /***************************/ /* CUSPARSE ERROR CHECKING */ /***************************/ static const char* _cusparseGetErrorEnum(cusparseStatus_t error) { switch (error) { case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case CUSPARSE_STATUS_ZERO_PIVOT: return "CUSPARSE_STATUS_ZERO_PIVOT"; } return "<unknown>"; } inline void __cusparseSafeCall(cusparseStatus_t err, const char* file, const int line) { if (CUSPARSE_STATUS_SUCCESS != err) { fprintf(stderr, "CUSPARSE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \ _cusparseGetErrorEnum(err)); \ cudaDeviceReset(); assert(0); \ } } extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); } double* tester(int N, double ratio) { ////int** a;/* double* a = (double*)malloc(sizeof(double*) * N * N + 1); ////////////////////////// 不应该是N*N //cout << sizeof(a) << " ->size of a"; int count = 0;//记录随即产生1的个数 srand(time(0)); //初始种子 int M = N; // cow = col double PE = ratio / N; //初始数组 for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) { a[i * N + j] = rand() % 2; if (a[i * N + j] == 1) count++; } //按比例随即修正 if (count > PE* M* N) { for (int n = count, i, j; n - PE * M * N > 0; ) { i = rand() % M; j = rand() % N; if (1 == a[i * N + j]) { a[i * N + j] = 0; n--; } } } else if (count < PE* M* N) { for (int n = count, i, j; PE * M * N - n > 0; ) { i = rand() % M; j = rand() % N; if (0 == a[i * N + j]) { a[i * N + j] = 1; n++; } } } return a; } void print_matrix(double* h_A_dense, int N) { for (int j = 0; j < N; j++) { for (int i = 0; i < N; i++) { //printf("%f \t", h_C_dense[i * N + j]); if (h_A_dense[j * N + i] != 0) { cout << 1 << " "; } else { cout << 0 << " "; } } printf("\n"); } printf("print_matrix done\n"); } /********/ /* MAIN */ /********/ int main(int args, char **argv) { std::cout << "Input: ./exe beg csr weight\n"; if (args != 5) { std::cout << "Wrong input\n"; return -1; } const char *beg_file = argv[1]; const char *csr_file = argv[2]; const char *weight_file = argv[3]; const int N =atoi(argv[4]); std::cout << "Normal 19!!! "; //template <file_vertex_t, file_index_t, file_weight_t //new_vertex_t, new_index_t, new_weight_t> graph<long, long, /*int*/ long, long, long, /* char*/ long> *ginst = new graph<long, long, /*int*/ long, long, long, /*char*/ long>(beg_file, csr_file, weight_file); std::cout << "\n"; std::cout << "New Start" << "\n"; // --- Initialize cuSPARSE cusparseHandle_t handle; cusparseCreate(&handle); /**************************/ /* SETTING UP THE PROBLEM */ /**************************/ // const int N = 16384; // --- Number of rows and columns // --- Host side dense matrices double* h_A_dense = (double*)malloc(N * N * sizeof(*h_A_dense)); // double* h_C_dense = (double*)malloc(N * N * sizeof(*h_C_dense)); double* h_C_dense; //// --- Column-major ordering //h_A_dense[0] = 0.4612; h_A_dense[4] = -0.0006; h_A_dense[8] = 0.3566; h_A_dense[12] = 0.0; //h_A_dense[1] = -0.0006; h_A_dense[5] = 0.4640; h_A_dense[9] = 0.0723; h_A_dense[13] = 0.0; //h_A_dense[2] = 0.3566; h_A_dense[6] = 0.0723; h_A_dense[10] = 0.7543; h_A_dense[14] = 0.0; //h_A_dense[3] = 0.; h_A_dense[7] = 0.0; h_A_dense[11] = 0.0; h_A_dense[15] = 0.1; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float arr1[16384]; float arr2[16384]; int i = 0; // string path = "delaunay_n14.txt"; // ifstream myfile(path);// delaunay_n14 toy // if (!myfile) { // cout << "Unable to open myfile"; // exit(1); // terminate with error // } // else // { // fstream f(path); // vector<string> words; // string line; // while (getline(f, line)) // { // words.push_back(line); // } //dictionary.txt在csdn里面可以下载,里面有4万多个单词,相当于一个字典 // cout << "Num of edge:" << words.size() << endl; // /*for (int i = 0; i < words.size(); i++) // { cout << words[i] << endl; }*/ // char str[64] = { 0 }; for (int i = 0; i < N * N; i++) { h_A_dense[i] = 0; if (i % N == i / N) { h_A_dense[i] = 1; } } for (int i = 0; i < ginst->vert_count; i++) { int beg = ginst->beg_pos[i]; int end = ginst->beg_pos[i + 1]; //std::cout<<beg<<"beg "<<"\n"; //std::cout<<end<<"end "<<"\n"; // std::cout << i << " -> "; // std::cout<<i<<"'s outgoing money: "; for (int j = beg; j < end; j++) { // std::cout << ginst->csr[j] << " "; h_A_dense[ i *N + ginst->csr[j] ] = 1; } // std::cout << "\n"; // cout << "Normal at line 160!!! "; } cout<<ginst->vert_count<< " "<< ginst->beg_pos[ginst->vert_count]<<endl; // for (int i = 0; i < words.size(); i++) // { // myfile.getline(str, 64); // //cout << words[i] << "\n"; // sscanf(str, "%f %f", &arr1[i], &arr2[i]); // cout <<i<<" : " << arr1[i] * N << " , " << arr2[i] << " is 1\n"; // h_A_dense[(int)((arr1[i]) * N + arr2[i])] = 1; ///(arr1[i]-1) don't use it ! // } // } //print_matrix(h_A_dense, N); // h_B_dense = h_A_dense; int nnz_return = 0; cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i = 0; i < N-1; i++) { int temp = nnz_return; h_A_dense = SMVP_saveMemory(h_A_dense, &nnz_return, N); cout << "-------------------nnz= " << nnz_return<<" "<< i << " round\n"; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); std::cout << "CUDA spent: " << elapsedTime << " ms"<<std::endl; if (temp == nnz_return) { cout << "finish after "<< i<< " round"; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); std::cout << "CUDA spent: " << elapsedTime << " ms"<<std::endl; exit(0); // terminate with error } } //cout << "finish after " << N-1 << " round"; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); std::cout << "CUDA spent: " << elapsedTime << std::endl; exit(0); double* h_B_dense;// debug-test // --- Create device arrays and copy host arrays to them double* d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); double* d_B_dense; gpuErrchk(cudaMalloc(&d_B_dense, N * N * sizeof(*d_B_dense))); double* d_C_dense; gpuErrchk(cudaMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_B_dense, h_B_dense, N * N * sizeof(*d_B_dense), cudaMemcpyHostToDevice)); // --- Descriptor for sparse matrix A cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSafeCall(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix B cusparseMatDescr_t descrB; cusparseSafeCall(cusparseCreateMatDescr(&descrB)); cusparseSafeCall(cusparseSetMatType(descrB, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrB, CUSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C cusparseMatDescr_t descrC; cusparseSafeCall(cusparseCreateMatDescr(&descrC)); cusparseSafeCall(cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrC, CUSPARSE_INDEX_BASE_ONE)); int nnzA = 0; // --- Number of nonzero elements in dense matrix A int nnzB = 0; // --- Number of nonzero elements in dense matrix B const int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A int* d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Device side number of nonzero elements per row of matrix B int* d_nnzPerVectorB; gpuErrchk(cudaMalloc(&d_nnzPerVectorB, N * sizeof(*d_nnzPerVectorB))); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, N, N, descrB, d_B_dense, lda, d_nnzPerVectorB, &nnzB)); // --- Host side number of nonzero elements per row of matrix A int* h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(cudaMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), cudaMemcpyDeviceToHost)); // --- Host side number of nonzero elements per row of matrix B int* h_nnzPerVectorB = (int*)malloc(N * sizeof(*h_nnzPerVectorB)); gpuErrchk(cudaMemcpy(h_nnzPerVectorB, d_nnzPerVectorB, N * sizeof(*h_nnzPerVectorB), cudaMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix double* d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); double* d_B; gpuErrchk(cudaMalloc(&d_B, nnzB * sizeof(*d_B))); int* d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); int* d_B_RowIndices; gpuErrchk(cudaMalloc(&d_B_RowIndices, (N + 1) * sizeof(*d_B_RowIndices))); int* d_C_RowIndices; gpuErrchk(cudaMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); int* d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); int* d_B_ColIndices; gpuErrchk(cudaMalloc(&d_B_ColIndices, nnzB * sizeof(*d_B_ColIndices))); cusparseSafeCall(cusparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); cusparseSafeCall(cusparseDdense2csr(handle, N, N, descrB, d_B_dense, lda, d_nnzPerVectorB, d_B, d_B_RowIndices, d_B_ColIndices)); // --- Host side sparse matrices double* h_A = (double*)malloc(nnzA * sizeof(*h_A)); double* h_B = (double*)malloc(nnzB * sizeof(*h_B)); int* h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); int* h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); int* h_B_RowIndices = (int*)malloc((N + 1) * sizeof(*h_B_RowIndices)); int* h_B_ColIndices = (int*)malloc(nnzB * sizeof(*h_B_ColIndices)); int* h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); gpuErrchk(cudaMemcpy(h_A, d_A, nnzA * sizeof(*h_A), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_B, d_B, nnzB * sizeof(*h_B), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_B_RowIndices, d_B_RowIndices, (N + 1) * sizeof(*h_B_RowIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_B_ColIndices, d_B_ColIndices, nnzB * sizeof(*h_B_ColIndices), cudaMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication int baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory int* nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(cusparseXcsrgemmNnz(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(cudaMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&baseC, d_C_RowIndices, sizeof(int), cudaMemcpyDeviceToHost)); nnzC -= baseC; } int* d_C_ColIndices; gpuErrchk(cudaMalloc(&d_C_ColIndices, nnzC * sizeof(int))); double* d_C; gpuErrchk(cudaMalloc(&d_C, nnzC * sizeof(double))); double* h_C = (double*)malloc(nnzC * sizeof(*h_C)); int* h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(cusparseDcsrgemm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(cusparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); //gpuErrchk(cudaMemcpy(h_C, d_C, nnzC * sizeof(*h_C), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), cudaMemcpyDeviceToHost)); print_matrix(h_C_dense, N); //gpuErrchk(cudaDeviceSynchronize()); //gpuErrchk(cudaMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), cudaMemcpyDeviceToHost)); //printf("before h_C_RowIndices\n"); //for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); ///////////////////////// has bug ///////////////////////////again ///////////////////////// matrix C give value to matrix A (CSR format) //d_A = d_C; d_A_RowIndices = d_C_RowIndices; d_A_ColIndices = d_C_ColIndices; //nnzA = nnzC; //gpuErrchk(cudaDeviceSynchronize()); //gpuErrchk(cudaMemcpy(h_C_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices), cudaMemcpyDeviceToHost)); //printf("A copy C \n"); //for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); ////gpuErrchk(cudaMemcpyToSymbol(d_A, d_C, nnzC * sizeof(double))); ////cudaMemcpyToSymbol(d_A_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices)); ////cudaMemcpyToSymbol(d_A_ColIndices, d_C_ColIndices, nnzC * sizeof(int)); ////// --- Performing the matrix - matrix multiplication ////baseC, nnzC = 0; ////// nnzTotalDevHostPtr points to host memory ////nnzTotalDevHostPtr = &nnzC; //cusparseSafeCall(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST)); //cudaFree(d_C_RowIndices); ///*int* d_C_RowIndices; */ gpuErrchk(cudaMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); //cusparseSafeCall(cusparseXcsrgemmNnz(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, // N, N, N, // descrA, nnzA, d_A_RowIndices, d_A_ColIndices, // descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, // d_C_RowIndices, nnzTotalDevHostPtr)); //if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; //else { // gpuErrchk(cudaMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), cudaMemcpyDeviceToHost)); // gpuErrchk(cudaMemcpy(&baseC, d_C_RowIndices, sizeof(int), cudaMemcpyDeviceToHost)); // nnzC -= baseC; //} //cusparseSafeCall(cusparseDcsrgemm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, // N, N, N, // descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, // descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, // d_C, d_C_RowIndices, d_C_ColIndices)); //gpuErrchk(cudaDeviceSynchronize()); //gpuErrchk(cudaMemcpy(h_C_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices), cudaMemcpyDeviceToHost)); //printf("result h_C_RowIndices \n"); //for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); ////cusparseSafeCall(cusparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); //cusparseSafeCall(cusparseDcsr2dense(handle, N, N, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, N)); //gpuErrchk(cudaMemcpy(h_C_dense, d_A_dense, N * N * sizeof(double), cudaMemcpyDeviceToHost)); //print_matrix(h_C_dense, N); h_A_dense = h_C_dense; cout << "new A: \n"; print_matrix(h_A_dense,N); // --- Create device arrays and copy host arrays to them /* double* d_A_dense; */ gpuErrchk(cudaMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); /* double* d_C_dense; */ gpuErrchk(cudaMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), cudaMemcpyHostToDevice)); // --- Descriptor for sparse matrix A cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSafeCall(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C cusparseSafeCall(cusparseCreateMatDescr(&descrC)); cusparseSafeCall(cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrC, CUSPARSE_INDEX_BASE_ONE)); nnzA = 0; // --- Number of nonzero elements in dense matrix A //lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A gpuErrchk(cudaMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Host side number of nonzero elements per row of matrix A h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(cudaMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), cudaMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); gpuErrchk(cudaMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); gpuErrchk(cudaMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); cusparseSafeCall(cusparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); // --- Host side sparse matrices h_A = (double*)malloc(nnzA * sizeof(*h_A)); h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); gpuErrchk(cudaMemcpy(h_A, d_A, nnzA * sizeof(*h_A), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(cusparseXcsrgemmNnz(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(cudaMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&baseC, d_C_RowIndices, sizeof(int), cudaMemcpyDeviceToHost)); nnzC -= baseC; } gpuErrchk(cudaMalloc(&d_C_ColIndices, nnzC * sizeof(int))); gpuErrchk(cudaMalloc(&d_C, nnzC * sizeof(double))); h_C = (double*)malloc(nnzC * sizeof(*h_C)); h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(cusparseDcsrgemm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(cusparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); //gpuErrchk(cudaMemcpy(h_C, d_C, nnzC * sizeof(*h_C), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), cudaMemcpyDeviceToHost)); print_matrix(h_C_dense, N); cout << "new C: \n"; cout << "num of nnzC " << nnzC; cout << "use function\n\n "; nnz_return = 0; h_A_dense = SMVP(h_A_dense, &nnz_return, N); h_A_dense = SMVP(h_A_dense, &nnz_return, N); h_A_dense = SMVP(h_A_dense, &nnz_return, N); h_A_dense = SMVP(h_A_dense, &nnz_return, N); return 0; return 0; } double* SMVP(double* h_C_dense, int *nnz_return, int N) { double* h_A_dense = h_C_dense; //debug /*cout << "input A: \n"; print_matrix(h_A_dense, N);*/ cusparseHandle_t handle; cusparseCreate(&handle); // --- Create device arrays and copy host arrays to them double* d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); double* d_C_dense; gpuErrchk(cudaMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), cudaMemcpyHostToDevice)); // --- Descriptor for sparse matrix A cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSafeCall(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C cusparseMatDescr_t descrC; cusparseSafeCall(cusparseCreateMatDescr(&descrC)); cusparseSafeCall(cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrC, CUSPARSE_INDEX_BASE_ONE)); int nnzA = 0; // --- Number of nonzero elements in dense matrix A // --- Number of nonzero elements in dense matrix B int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A int* d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Host side number of nonzero elements per row of matrix A int* h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(cudaMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), cudaMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix double* d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); int* d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); int* d_C_RowIndices; gpuErrchk(cudaMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); int* d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); cusparseSafeCall(cusparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); // --- Host side sparse matrices //double* h_A = (double*)malloc(nnzA * sizeof(*h_A)); //int* h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); //int* h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); //int* h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); //gpuErrchk(cudaMemcpy(h_A, d_A, nnzA * sizeof(*h_A), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication int baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory int* nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(cusparseXcsrgemmNnz(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(cudaMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&baseC, d_C_RowIndices, sizeof(int), cudaMemcpyDeviceToHost)); nnzC -= baseC; } int* d_C_ColIndices; gpuErrchk(cudaMalloc(&d_C_ColIndices, nnzC * sizeof(int))); double* d_C; gpuErrchk(cudaMalloc(&d_C, nnzC * sizeof(double))); double* h_C = (double*)malloc(nnzC * sizeof(*h_C)); int* h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(cusparseDcsrgemm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(cusparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); cudaFree(descrC); cudaFree(d_C); cudaFree(d_C_RowIndices); cudaFree(d_C_ColIndices); cudaFree(descrA); cudaFree(d_A); cudaFree(d_A_RowIndices); cudaFree(d_A_ColIndices); //gpuErrchk(cudaMemcpy(h_C, d_C, nnzC * sizeof(*h_C), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), cudaMemcpyDeviceToHost)); cudaFree(d_C_dense); /*cout << "output \n"; print_matrix(h_C_dense, N);*/ cout << "nnzC " << nnzC<<"\n"; *nnz_return = nnzC; return h_C_dense; } double* SMVP_saveMemory(double* h_C_dense, int* nnz_return, int N) { double* h_A_dense = h_C_dense; //debug /*cout << "input A: \n"; print_matrix(h_A_dense, N);*/ cusparseHandle_t handle; cusparseCreate(&handle); // --- Create device arrays and copy host arrays to them double* d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, N * N * sizeof(*d_A_dense))); double* d_C_dense; gpuErrchk(cudaMalloc(&d_C_dense, N * N * sizeof(*d_C_dense))); gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, N * N * sizeof(*d_A_dense), cudaMemcpyHostToDevice)); // --- Descriptor for sparse matrix A cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSafeCall(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE)); // --- Descriptor for sparse matrix C cusparseMatDescr_t descrC; cusparseSafeCall(cusparseCreateMatDescr(&descrC)); cusparseSafeCall(cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrC, CUSPARSE_INDEX_BASE_ONE)); int nnzA = 0; // --- Number of nonzero elements in dense matrix A // --- Number of nonzero elements in dense matrix B int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row of matrix A int* d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA))); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); // --- Host side number of nonzero elements per row of matrix A int* h_nnzPerVectorA = (int*)malloc(N * sizeof(*h_nnzPerVectorA)); gpuErrchk(cudaMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), cudaMemcpyDeviceToHost)); /*printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]); printf("\n");*/ //printf("Number of nonzero elements in dense matrix B = %i\n\n", nnzB); //for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorB[i]); //printf("\n"); // --- Device side sparse matrix double* d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); int* d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices))); int* d_C_RowIndices; gpuErrchk(cudaMalloc(&d_C_RowIndices, (N + 1) * sizeof(*d_C_RowIndices))); int* d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); cusparseSafeCall(cusparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); // --- Host side sparse matrices //double* h_A = (double*)malloc(nnzA * sizeof(*h_A)); //int* h_A_RowIndices = (int*)malloc((N + 1) * sizeof(*h_A_RowIndices)); //int* h_A_ColIndices = (int*)malloc(nnzA * sizeof(*h_A_ColIndices)); //int* h_C_RowIndices = (int*)malloc((N + 1) * sizeof(*h_C_RowIndices)); //gpuErrchk(cudaMemcpy(h_A, d_A, nnzA * sizeof(*h_A), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost)); // --- Performing the matrix - matrix multiplication int baseC, nnzC = 0; // nnzTotalDevHostPtr points to host memory int* nnzTotalDevHostPtr = &nnzC; cusparseSafeCall(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(cusparseXcsrgemmNnz(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC, d_C_RowIndices, nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC = *nnzTotalDevHostPtr; else { gpuErrchk(cudaMemcpy(&nnzC, d_C_RowIndices + N, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&baseC, d_C_RowIndices, sizeof(int), cudaMemcpyDeviceToHost)); nnzC -= baseC; } int* d_C_ColIndices; gpuErrchk(cudaMalloc(&d_C_ColIndices, nnzC * sizeof(int))); double* d_C; gpuErrchk(cudaMalloc(&d_C, nnzC * sizeof(double))); double* h_C = (double*)malloc(nnzC * sizeof(*h_C)); int* h_C_ColIndices = (int*)malloc(nnzC * sizeof(*h_C_ColIndices)); cusparseSafeCall(cusparseDcsrgemm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC, d_C, d_C_RowIndices, d_C_ColIndices)); cusparseSafeCall(cusparseDcsr2dense(handle, N, N, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_C_dense, N)); cudaFree(descrC); cudaFree(d_C); cudaFree(d_C_RowIndices); cudaFree(d_C_ColIndices); cudaFree(descrA); cudaFree(d_A); cudaFree(d_A_RowIndices); cudaFree(d_A_ColIndices); //gpuErrchk(cudaMemcpy(h_C, d_C, nnzC * sizeof(*h_C), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_RowIndices, d_C_RowIndices, (N + 1) * sizeof(*h_C_RowIndices), cudaMemcpyDeviceToHost)); //gpuErrchk(cudaMemcpy(h_C_ColIndices, d_C_ColIndices, nnzC * sizeof(*h_C_ColIndices), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), cudaMemcpyDeviceToHost)); cudaFree(d_C_dense); cudaFree(d_A_dense); cudaFree(d_nnzPerVectorA); free(h_nnzPerVectorA); /*cout << "output \n"; print_matrix(h_C_dense, N);*/ cout << "nnzC " << nnzC << "\n"; *nnz_return = nnzC; return h_C_dense; }
88894500a133e43c5b4b41a2a857f7083ae7c415.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "utils/cuda_device.h" #include "utils/math_functions.h" #include "utils/cast.h" namespace dragon { namespace math { /******************** Level-0 ********************/ template <typename T> __global__ void _Set( const int n, const T alpha, T* x) { CUDA_1D_KERNEL_LOOP(idx, n) { x[idx] = alpha; } } template <> void Set<float, CUDAContext>( const int n, const float alpha, float* x, CUDAContext* ctx) { if (alpha == 0.f) { CUDA_CHECK(hipMemsetAsync(x, 0, sizeof(float) * n, ctx->cuda_stream())); } else { _Set<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, x); } } template <> void Set<int, CUDAContext>( const int n, const int alpha, int* x, CUDAContext* ctx) { if (alpha == 0) { CUDA_CHECK(hipMemsetAsync(x, 0, sizeof(int) * n, ctx->cuda_stream())); } else { _Set<int> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, x); } } template <> void RandomUniform<uint32_t, CUDAContext>( const int n, const float low, const float high, uint32_t* x, CUDAContext* ctx) { // note that we ignore the low / high // hiprand could only generates in the range of [0, uint32] auto* rng = ctx->curand_generator(); CURAND_CHECK(hiprandGenerate(rng, x, n)); } template <> void RandomNormal<float, CUDAContext>( const int n, const float mu, const float sigma, float* x, CUDAContext* ctx) { auto* rng = ctx->curand_generator(); CURAND_CHECK(hiprandGenerateNormal(rng, x, n, mu, sigma)); } template <> void RandomBernoulli<float, CUDAContext>( const int n, const float p, unsigned int* x, CUDAContext* ctx) { // hiprand could not generate bernoulli distribution // we recommend implement it within specfic case, e.g. Dropout NOT_IMPLEMENTED; } /******************** Level-1 ********************/ template <typename T> __global__ void _Add( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] + b[idx]; } } template <> void Add<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Add<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Sub( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] - b[idx]; } } template <> void Sub<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Sub<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Mul( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] * b[idx]; } } template <> void Mul<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Mul<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Div( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] / b[idx]; } } template <> void Div<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Div<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Clip( const int n, const T low, const T high, T* x) { CUDA_1D_KERNEL_LOOP(idx, n) { x[idx] = x[idx] > high ? high : x[idx]; x[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>( const int n, const float low, const float high, float* x, CUDAContext* ctx) { _Clip<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, low, high, x); } template <typename T> __global__ void _Exp( const int n, const T* a, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = exp(a[idx]); } } template <> void Exp<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Exp<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Log( const int n, const T* a, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = log(a[idx]); } } template <> void Log<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Log<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Square( const int n, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = x[idx] * x[idx]; } } template <> void Square<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Square<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Sqrt( const int n, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = sqrt(x[idx]); } } template <> void Sqrt<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Sqrt<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Pow( const int n, const T alpha, const T* a, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = pow(a[idx], alpha); } } template <> void Pow<float, CUDAContext>( int n, const float alpha, const float* x, float* y, CUDAContext* ctx) { _Pow<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, x, y); } template <typename T> __global__ void _Inv( const int n, const float numerator, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = numerator / x[idx]; } } template <> void Inv<float, CUDAContext>( const int n, const float numerator, const float* x, float* y, CUDAContext* ctx) { _Inv<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, numerator, x, y); } /******************** Level-2 ********************/ template <> void Scal<float, CUDAContext>( const int n, const float alpha, float* y, CUDAContext* ctx) { CUBLAS_CHECK(hipblasSscal( ctx->cublas_handle(), n, &alpha, y, 1)); } template <> void Scale<float, CUDAContext>( const int n, const float alpha, const float* x, float* y, CUDAContext* ctx) { CUBLAS_CHECK(hipblasScopy( ctx->cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal( ctx->cublas_handle(), n, &alpha, y, 1)); } template <> void StridedDot<float, CUDAContext>( const int n, const float* a, const int incx, const float* b, const int incy, float* y, CUDAContext* ctx) { CUBLAS_CHECK(hipblasSdot(ctx->cublas_handle(), n, a, incx, b, incy, y)); } template <> void Dot<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { StridedDot<float, CUDAContext>( n, a, 1, b, 1, y, ctx); ctx->FinishDeviceCompution(); } template <> float ASum<float, CUDAContext>( const int n, const float* x) { return hipblasSasum(n, x, 1); } template <typename T> __global__ void _AddScalar( const int n, T alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] += alpha; } } template <> void AddScalar<float, CUDAContext>( const int n, const float alpha, float* y, CUDAContext* ctx) { _AddScalar<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, y); } template <typename T> __global__ void _MulScalar( const int n, T alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] *= alpha; } } template <> void MulScalar<float, CUDAContext>( const int n, const float alpha, float* y, CUDAContext* ctx) { _MulScalar<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, y); } template <> void Axpy<float, CUDAContext>( const int n, float alpha, const float* x, float* y, CUDAContext* ctx) { CUBLAS_CHECK(hipblasSaxpy( ctx->cublas_handle(), n, &alpha, x, 1, y, 1)); } template <> void Axpby<float, CUDAContext>( const int n, float alpha, const float* x, float beta, float* y, CUDAContext* ctx) { Scal<float, CUDAContext>(n, beta, y, ctx); Axpy<float, CUDAContext>(n, alpha, x, y, ctx); } template <> void RandomUniform<float, CUDAContext>( const int n, const float low, const float high, float* x, CUDAContext* ctx) { CURAND_CHECK(hiprandGenerateUniform( ctx->curand_generator(), x, n)); float range = high - low; if (range != 1.f) Scal<float, CUDAContext>(n, range, x, ctx); if (low != 0.f) AddScalar<float, CUDAContext>(n, low, x, ctx); } /******************** Level-3 ********************/ template <> void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* ctx, TensorProto_DataType math_type) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const float _alpha_ = alpha, _beta_ = beta; CUBLAS_CHECK(hipblasSgemm(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &_alpha_, B, ldb, A, lda, &_beta_, C, N)); } template <> void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* ctx, TensorProto_DataType math_type) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; const float _alpha_ = alpha, _beta_ = beta; CUBLAS_CHECK(hipblasSgemv( ctx->cublas_handle(), cuTransA, N, M, &_alpha_, A, N, x, 1, &_beta_, y, 1)); } } // namespace math } // namespace dragon #endif // WITH_CUDA
88894500a133e43c5b4b41a2a857f7083ae7c415.cu
#ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "utils/cuda_device.h" #include "utils/math_functions.h" #include "utils/cast.h" namespace dragon { namespace math { /******************** Level-0 ********************/ template <typename T> __global__ void _Set( const int n, const T alpha, T* x) { CUDA_1D_KERNEL_LOOP(idx, n) { x[idx] = alpha; } } template <> void Set<float, CUDAContext>( const int n, const float alpha, float* x, CUDAContext* ctx) { if (alpha == 0.f) { CUDA_CHECK(cudaMemsetAsync(x, 0, sizeof(float) * n, ctx->cuda_stream())); } else { _Set<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, x); } } template <> void Set<int, CUDAContext>( const int n, const int alpha, int* x, CUDAContext* ctx) { if (alpha == 0) { CUDA_CHECK(cudaMemsetAsync(x, 0, sizeof(int) * n, ctx->cuda_stream())); } else { _Set<int> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, x); } } template <> void RandomUniform<uint32_t, CUDAContext>( const int n, const float low, const float high, uint32_t* x, CUDAContext* ctx) { // note that we ignore the low / high // curand could only generates in the range of [0, uint32] auto* rng = ctx->curand_generator(); CURAND_CHECK(curandGenerate(rng, x, n)); } template <> void RandomNormal<float, CUDAContext>( const int n, const float mu, const float sigma, float* x, CUDAContext* ctx) { auto* rng = ctx->curand_generator(); CURAND_CHECK(curandGenerateNormal(rng, x, n, mu, sigma)); } template <> void RandomBernoulli<float, CUDAContext>( const int n, const float p, unsigned int* x, CUDAContext* ctx) { // curand could not generate bernoulli distribution // we recommend implement it within specfic case, e.g. Dropout NOT_IMPLEMENTED; } /******************** Level-1 ********************/ template <typename T> __global__ void _Add( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] + b[idx]; } } template <> void Add<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Add<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Sub( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] - b[idx]; } } template <> void Sub<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Sub<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Mul( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] * b[idx]; } } template <> void Mul<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Mul<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Div( const int n, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = a[idx] / b[idx]; } } template <> void Div<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { _Div<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, a, b, y); } template <typename T> __global__ void _Clip( const int n, const T low, const T high, T* x) { CUDA_1D_KERNEL_LOOP(idx, n) { x[idx] = x[idx] > high ? high : x[idx]; x[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>( const int n, const float low, const float high, float* x, CUDAContext* ctx) { _Clip<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, low, high, x); } template <typename T> __global__ void _Exp( const int n, const T* a, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = exp(a[idx]); } } template <> void Exp<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Exp<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Log( const int n, const T* a, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = log(a[idx]); } } template <> void Log<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Log<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Square( const int n, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = x[idx] * x[idx]; } } template <> void Square<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Square<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Sqrt( const int n, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = sqrt(x[idx]); } } template <> void Sqrt<float, CUDAContext>( int n, const float* x, float* y, CUDAContext* ctx) { _Sqrt<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, x, y); } template <typename T> __global__ void _Pow( const int n, const T alpha, const T* a, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = pow(a[idx], alpha); } } template <> void Pow<float, CUDAContext>( int n, const float alpha, const float* x, float* y, CUDAContext* ctx) { _Pow<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, x, y); } template <typename T> __global__ void _Inv( const int n, const float numerator, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = numerator / x[idx]; } } template <> void Inv<float, CUDAContext>( const int n, const float numerator, const float* x, float* y, CUDAContext* ctx) { _Inv<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, numerator, x, y); } /******************** Level-2 ********************/ template <> void Scal<float, CUDAContext>( const int n, const float alpha, float* y, CUDAContext* ctx) { CUBLAS_CHECK(cublasSscal_v2( ctx->cublas_handle(), n, &alpha, y, 1)); } template <> void Scale<float, CUDAContext>( const int n, const float alpha, const float* x, float* y, CUDAContext* ctx) { CUBLAS_CHECK(cublasScopy_v2( ctx->cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal_v2( ctx->cublas_handle(), n, &alpha, y, 1)); } template <> void StridedDot<float, CUDAContext>( const int n, const float* a, const int incx, const float* b, const int incy, float* y, CUDAContext* ctx) { CUBLAS_CHECK(cublasSdot_v2(ctx->cublas_handle(), n, a, incx, b, incy, y)); } template <> void Dot<float, CUDAContext>( int n, const float* a, const float* b, float* y, CUDAContext* ctx) { StridedDot<float, CUDAContext>( n, a, 1, b, 1, y, ctx); ctx->FinishDeviceCompution(); } template <> float ASum<float, CUDAContext>( const int n, const float* x) { return cublasSasum(n, x, 1); } template <typename T> __global__ void _AddScalar( const int n, T alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] += alpha; } } template <> void AddScalar<float, CUDAContext>( const int n, const float alpha, float* y, CUDAContext* ctx) { _AddScalar<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, y); } template <typename T> __global__ void _MulScalar( const int n, T alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] *= alpha; } } template <> void MulScalar<float, CUDAContext>( const int n, const float alpha, float* y, CUDAContext* ctx) { _MulScalar<float> << < CUDA_BLOCKS(n), CUDA_THREADS, 0, ctx->cuda_stream() >> >(n, alpha, y); } template <> void Axpy<float, CUDAContext>( const int n, float alpha, const float* x, float* y, CUDAContext* ctx) { CUBLAS_CHECK(cublasSaxpy_v2( ctx->cublas_handle(), n, &alpha, x, 1, y, 1)); } template <> void Axpby<float, CUDAContext>( const int n, float alpha, const float* x, float beta, float* y, CUDAContext* ctx) { Scal<float, CUDAContext>(n, beta, y, ctx); Axpy<float, CUDAContext>(n, alpha, x, y, ctx); } template <> void RandomUniform<float, CUDAContext>( const int n, const float low, const float high, float* x, CUDAContext* ctx) { CURAND_CHECK(curandGenerateUniform( ctx->curand_generator(), x, n)); float range = high - low; if (range != 1.f) Scal<float, CUDAContext>(n, range, x, ctx); if (low != 0.f) AddScalar<float, CUDAContext>(n, low, x, ctx); } /******************** Level-3 ********************/ template <> void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* ctx, TensorProto_DataType math_type) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const float _alpha_ = alpha, _beta_ = beta; CUBLAS_CHECK(cublasSgemm_v2(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &_alpha_, B, ldb, A, lda, &_beta_, C, N)); } template <> void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* ctx, TensorProto_DataType math_type) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; const float _alpha_ = alpha, _beta_ = beta; CUBLAS_CHECK(cublasSgemv_v2( ctx->cublas_handle(), cuTransA, N, M, &_alpha_, A, N, x, 1, &_beta_, y, 1)); } } // namespace math } // namespace dragon #endif // WITH_CUDA
02fdc1f90cd508d2a1a34da00901569dfc9c52e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> #include <sstream> #include <fstream> #include <iostream> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include "changeDatatype.cuh" using namespace std; __global__ void changeType(float* srcData, float* dstData, int n, int c, int h, int w, int filtersPerThread_x, int filtersPerThread_y) { const int idxCol = threadIdx.y + blockDim.y*blockIdx.y; const int idxRow = threadIdx.x + blockDim.x*blockIdx.x; int maxBlock = (n * c) / (filtersPerThread_x * filtersPerThread_y); int idxBlock = (int)fminf((float)(blockIdx.y * gridDim.x + blockIdx.x), (float)(maxBlock)); const int idxfilterW = threadIdx.x % w; const int idxfilterH = threadIdx.y % h; int threadChannelX = threadIdx.x / w; int threadChannelY = threadIdx.y / h; int idxChannel_a =idxBlock * filtersPerThread_x * filtersPerThread_y + threadChannelY *filtersPerThread_x + threadChannelX; int idxChannel = idxChannel_a % c; int idxN = (int)fminf((float)(idxChannel_a / c), (float)(n-1)); dstData[idxN * (c * w* h) + idxChannel * (w*h) + idxfilterH * w + idxfilterW] = srcData[idxfilterH * (n * c * w) + idxfilterW * (c * n) + idxChannel * n + idxN]; } void changeDataType(float* srcData, float* dstData, int n, int c, int h, int w) { int filtersPerThread_x = 30 / w; int filtersPerThread_y = 30 / h; int totalBlocks = (c * n) / (filtersPerThread_x * filtersPerThread_y) + 1; int numBlock_y = totalBlocks / 255 + 1; dim3 numOfBlocks(255, numBlock_y, 1); dim3 threadsPerBlock(30, 30, 1); changeType <<< numOfBlocks, threadsPerBlock >> > (srcData, dstData, n, c, h, w, filtersPerThread_x, filtersPerThread_y); }
02fdc1f90cd508d2a1a34da00901569dfc9c52e8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <device_functions.h> #include <sstream> #include <fstream> #include <iostream> #include <stdlib.h> #include <math.h> #include <cuda.h> #include "changeDatatype.cuh" using namespace std; __global__ void changeType(float* srcData, float* dstData, int n, int c, int h, int w, int filtersPerThread_x, int filtersPerThread_y) { const int idxCol = threadIdx.y + blockDim.y*blockIdx.y; const int idxRow = threadIdx.x + blockDim.x*blockIdx.x; int maxBlock = (n * c) / (filtersPerThread_x * filtersPerThread_y); int idxBlock = (int)fminf((float)(blockIdx.y * gridDim.x + blockIdx.x), (float)(maxBlock)); const int idxfilterW = threadIdx.x % w; const int idxfilterH = threadIdx.y % h; int threadChannelX = threadIdx.x / w; int threadChannelY = threadIdx.y / h; int idxChannel_a =idxBlock * filtersPerThread_x * filtersPerThread_y + threadChannelY *filtersPerThread_x + threadChannelX; int idxChannel = idxChannel_a % c; int idxN = (int)fminf((float)(idxChannel_a / c), (float)(n-1)); dstData[idxN * (c * w* h) + idxChannel * (w*h) + idxfilterH * w + idxfilterW] = srcData[idxfilterH * (n * c * w) + idxfilterW * (c * n) + idxChannel * n + idxN]; } void changeDataType(float* srcData, float* dstData, int n, int c, int h, int w) { int filtersPerThread_x = 30 / w; int filtersPerThread_y = 30 / h; int totalBlocks = (c * n) / (filtersPerThread_x * filtersPerThread_y) + 1; int numBlock_y = totalBlocks / 255 + 1; dim3 numOfBlocks(255, numBlock_y, 1); dim3 threadsPerBlock(30, 30, 1); changeType <<< numOfBlocks, threadsPerBlock >> > (srcData, dstData, n, c, h, w, filtersPerThread_x, filtersPerThread_y); }
3347be81e98ca232d0651ae45eaee5f5fa0e374b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define DATA float #define BOOL int #define MAX_ERR 1e-5 //Grid features #define OPTIMUM_BLOCK_NUM 12 #define BLOCK_SIDE 16 #define OPTIMUM_BLOCK_NUM_FIRST_LAYER 2 #define BLOCK_SIDE_FIRST_LAYER 32 //Network features #define NEURO_INPUT 784 //#neurons of input layer #define NEURO_H_0 56 //#neurons of first hidden layer #define NEURO_H_1 28 //#neurons of second hidden layer #define NEURO_OUTPUT 10 //#neurons of output layer #define TOTAL_PATT 60000 //#total patterns #define NUM_HIDDEN 2 //#hidden layers #define TOTAL_LAYER 4 //#of layers #define MATRIX_NUMBER_STRUCT 3 //#matrix to copy to Device(in struct) #define GLOBAL_H_SIZE TOTAL_PATT * (NEURO_INPUT + NEURO_H_0 + NEURO_H_1 +NEURO_OUTPUT) #define NOT_TO_COPY TOTAL_PATT * (NEURO_H_0 + NEURO_H_1 +NEURO_OUTPUT) #define GLOBAL_W_SIZE (NEURO_INPUT*NEURO_H_0) + (NEURO_H_0*NEURO_H_1) + (NEURO_H_1*NEURO_OUTPUT) #define GLOBAL_BIAS_SIZE NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT /*Struct Grid Settings*/ typedef struct grid_settings { unsigned int grid[3]; unsigned int block[3]; }grid_settings; grid_settings gs = { { OPTIMUM_BLOCK_NUM_FIRST_LAYER, OPTIMUM_BLOCK_NUM, OPTIMUM_BLOCK_NUM },{BLOCK_SIDE_FIRST_LAYER,BLOCK_SIDE,BLOCK_SIDE} }; /*Struct One Copy HostToDev*/ typedef struct host_to_dev_mem{ int matrix_WB_index[MATRIX_NUMBER_STRUCT-1][TOTAL_LAYER-1];//INDEX for padding in Weight & Bias int matrix_H2H_index[MATRIX_NUMBER_STRUCT-2][TOTAL_LAYER];//INDEX for padding in H2H DATA WeightH2H[GLOBAL_W_SIZE]; DATA BiasH2H[GLOBAL_BIAS_SIZE]; DATA H2H[GLOBAL_H_SIZE]; } host_to_dev_mem; /*UTILITIES*/ static void HandleCuda(hipError_t err, const char *file, int line) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define HANDLE_CUDA( err ) (HandleCuda( err, __FILE__, __LINE__ )) void startTimer(hipEvent_t *start, hipEvent_t *stop) { HANDLE_CUDA(hipEventCreate(start)); HANDLE_CUDA(hipEventCreate(stop)); HANDLE_CUDA(hipEventRecord(*start, 0)); } void stopAndPrint(hipEvent_t *start, hipEvent_t *stop) { HANDLE_CUDA(hipEventRecord(*stop, 0)); HANDLE_CUDA(hipEventSynchronize(*stop)); float time = 0.0f; HANDLE_CUDA(hipEventElapsedTime(&time, *start, *stop)); printf("Elapsed Time: %f milliseconds\n", time); HANDLE_CUDA(hipEventDestroy(*start)); HANDLE_CUDA(hipEventDestroy(*stop)); } /*HOST*/ void feedforward(struct host_to_dev_mem * , struct host_to_dev_mem*, int *, int); void printMat(DATA *, int, int); void MMMulHost(DATA *, DATA *, DATA *, DATA *, int, int, int); BOOL matsAreEquals(DATA *, DATA *, int, int); /*HOST ALLOCATION AND INITIALIZATION*/ void HOST_init_struct(struct host_to_dev_mem* , int* ); /*---------------------------------------------------------------------KERNEL--------------------------------------------------------------------------*/ /*DEVICE*/ /* h2h il puntatore alla porzione dell'h2h globale da considerare in questa fase (ad ogni passo il kernel che invoca questo device incrementa il puntatore h2h in modo proporzionale al patt_per_step (e similmente h2h_dest) (vedi sotto))*/ __global__ void MMMulDevPartial(DATA *h2h, DATA *w, DATA *biases, DATA * h2h_dest, unsigned int row_w, unsigned int col_w, unsigned int num_pattern) { const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y if (pos_block_y >= num_pattern) { return; } int tx = threadIdx.x, ty = threadIdx.y; int block_x = blockIdx.x; int block_y = blockIdx.y; const int block_dim = blockDim.x; // assumiamo che i blocchi siano quadrati int dest_x = block_x*block_dim + tx; int dest_y = block_y*block_dim + ty; int w_x = block_x*block_dim; // start block in w int h2h_y = block_y*block_dim*row_w; // start block in h2h int end_h2h = h2h_y + row_w - 1; // last block position in h2h int step_w = block_dim*col_w; int step_h2h = block_dim; int min; DATA partial = 0.0f; int block_r_border = 0; // contatore che indica in che iterazione dei blocchi ci troviamo int current_inc; for (int wid = w_x, h2h_id = h2h_y; h2h_id <= end_h2h; wid += step_w, h2h_id += step_h2h) { block_r_border += block_dim; __shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER+1]; __shared__ DATA shared_h2h[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER+1]; int t_index_w = wid + tx + ty*col_w; int t_index_h2h = h2h_id + tx + ty*row_w; //Attenzione alla divergenza dei threads (vedi CCC pag.137) shared_h2h[ty][tx] = (t_index_h2h < num_pattern*row_w) ? (h2h[t_index_h2h]) : (0.0f); shared_w[ty][tx] = (t_index_w < col_w*row_w) ? (w[t_index_w]) : (0.0f); __syncthreads(); current_inc = row_w - (block_r_border - block_dim); min = (current_inc < block_dim) ? (current_inc) : (block_dim); for (int k = 0; k < min; k++) { partial += shared_h2h[ty][k] * shared_w[k][tx]; } __syncthreads(); } //Attenzione alla divergenza dei threads (vedi CCC pag.137) if (dest_x < col_w && dest_y < num_pattern) { h2h_dest[dest_y*col_w + dest_x] = 1.0 / (1.0 + (float)exp(-(partial + biases[dest_x]))); //SIGMA } } /*patt_per_step il numero di pattern (quando possibile...) da considerare in ciascuna iterazione su h2h*/ /*Questo kernel ad ogni passo incrementa il puntatore ad h2h di num_patt_per_step*NEURO_L_L_1 (e similmente h2h_dest), controlla che sia ancora nel range di h2h, e calcola num_pattern (vedi sopra) in funzione dei pattern mancanti*/ void MMMulDev(DATA *h2h, DATA *w, DATA *biases, DATA *h2h_dest, unsigned int row_w, unsigned int col_w , unsigned int patt_per_step, dim3 grid, dim3 block) { unsigned int current_patts; unsigned int remaining_patts; //const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y //Assumiamo che i blocchi siano quadrati (blockDim.x = blockDim.y) for (unsigned int x = 0; x < TOTAL_PATT; x += patt_per_step) { remaining_patts = TOTAL_PATT - x; current_patts = (remaining_patts < patt_per_step) ? (remaining_patts) : (patt_per_step); //if (pos_block_y >= current_patts) { return; } MMMulDevPartial << <grid, block >> > (h2h + x*row_w, w, biases, h2h_dest + x*col_w, row_w, col_w, current_patts); } } /*----------------------------------------------------------------------MAIN---------------------------------------------------------------------------*/ int main(void) { int *nupl = (int*)malloc(TOTAL_LAYER * sizeof(int)); /*questa parte bisogner renderla dinamica. In seguito bisogner accedere ai files.*/ nupl[0] = NEURO_INPUT; nupl[1] = NEURO_H_0; nupl[2] = NEURO_H_1; nupl[TOTAL_LAYER - 1] = NEURO_OUTPUT; /*host memory allocation and initialization*/ struct host_to_dev_mem *dev_htdm; HANDLE_CUDA(hipMalloc((void **)&dev_htdm, sizeof(struct host_to_dev_mem))); /*-----------------------------------FEEDFORWARD-------------------------------------------*/ //hipEvent_t start, stop; struct host_to_dev_mem *htdm = (struct host_to_dev_mem*)malloc(sizeof(struct host_to_dev_mem)); HOST_init_struct(htdm,nupl); //startTimer(&start, &stop); feedforward(htdm, dev_htdm, nupl, TOTAL_LAYER); //stopAndPrint(&start, &stop); /*-----------------------------END---FEEDFORWARD-------------------------------------------*/ //Host dealloc free(nupl); free(htdm); hipFree(dev_htdm); //HOST_dealloc(H2H_MAT, W_MAT, BIAS_MAT); //Cuda dealloc //CUDA_dealloc(DEV_H2H_MAT, DEV_W_MAT, DEV_BIAS_MAT); /*free(DEV_H2H_MAT); free(DEV_W_MAT); free(DEV_BIAS_MAT); */ return 0; } /*HOST*/ /*FIRT PHASE OF THE ALGORITHM -- THE INPUT IS TRANSMITTED VIA THE NETWORK*/ /* */ void feedforward(struct host_to_dev_mem * str_htdm, struct host_to_dev_mem *dev_htdm, int *nupl, int layers) { hipEvent_t start, stop; //Grid setting dim3 grid, block; unsigned int patt_per_step; startTimer(&start, &stop); HANDLE_CUDA(hipMemcpy(dev_htdm, str_htdm ,sizeof(struct host_to_dev_mem) - NOT_TO_COPY*sizeof(DATA),hipMemcpyHostToDevice)); stopAndPrint(&start, &stop); /* DATA **H2H_RES = (DATA**)malloc(TOTAL_LAYER * sizeof(DATA*)); for (int i = 0; i < TOTAL_LAYER; i++) { H2H_RES[i] = (DATA*)malloc(TOTAL_PATT*nupl[i] * sizeof(DATA)); } */ for (int l = 0; l < (layers - 1); l++) { block.x = gs.block[l]; block.y = gs.block[l]; grid.x = (nupl[l+1] + block.x - 1) / block.x; grid.y = gs.grid[l] / grid.x; patt_per_step = grid.y * block.y; //printf("block x = %u block y = %u grid x = %u grid y = %u pattern per step = %u\n", block.x, block.y, grid.x, grid.y, patt_per_step); startTimer(&start, &stop); MMMulDev( dev_htdm->H2H + str_htdm->matrix_H2H_index[0][l], dev_htdm->WeightH2H + str_htdm->matrix_WB_index[0][l] , dev_htdm->BiasH2H + str_htdm->matrix_WB_index[1][l], dev_htdm->H2H + str_htdm->matrix_H2H_index[0][l+1] , nupl[l], nupl[l+1], patt_per_step, grid, block); stopAndPrint(&start, &stop); hipDeviceSynchronize(); /* HANDLE_CUDA(hipMemcpy(str_htdm->H2H+ str_htdm->matrix_H2H_index[0][l+1],dev_htdm->H2H + str_htdm->matrix_H2H_index[0][l+1], (TOTAL_PATT)* nupl[l+1] * sizeof(DATA), hipMemcpyDeviceToHost)); MMMulHost( str_htdm->H2H + str_htdm->matrix_H2H_index[0][l], str_htdm->WeightH2H + str_htdm->matrix_WB_index[0][l] , str_htdm->BiasH2H + str_htdm->matrix_WB_index[1][l], H2H_RES[l + 1], TOTAL_PATT, nupl[l], nupl[l + 1]); BOOL b = matsAreEquals(str_htdm->H2H+ str_htdm->matrix_H2H_index[0][l+1], H2H_RES[l + 1], TOTAL_PATT, nupl[l + 1]); printf("%d\n", b);*/ } } /*UTILITY FUNCTIONS*/ /*Print a matrix*/ void printMat(DATA *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { printf("ROW %d : {", i); for (int j = 0; j < cols; j++) { printf("%f - ", mat[i*cols + j]); } printf("}"); printf("\n\n"); } printf("\n\n"); } /*On host multiplication*/ void MMMulHost(DATA *H2H, DATA *W, DATA *BIAS, DATA *H2H_RES, int row_H2H, int col_H2H, int col_W) { for (int i = 0; i < row_H2H; i++) { for (int j = 0; j < col_W; j++) { DATA prod = 0.0; for (int k = 0; k < col_H2H; k++) { prod += H2H[i*col_H2H + k] * W[k*col_W + j]; } H2H_RES[i*col_W + j] = 1.0 / (1.0 + (float)exp(-(prod + BIAS[j]))); // bias added } } } /*Check device*/ BOOL matsAreEquals(DATA *A, DATA *B, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // the first column is for adapting the data float err = fabs(A[i*cols + j] - B[i*cols + j]); //printf("Error in i=%d,j=%d: %f\n", i, j, err); if (err >= MAX_ERR) { printf("row: %d, col: %d\n", i, j); return 0; } } } return 1; } /*ALLOCATION FUNCTIONS*/ /* init struct on host */ void HOST_init_struct(struct host_to_dev_mem* htdm, int* nupl) { int prev_sum[MATRIX_NUMBER_STRUCT]; htdm->matrix_H2H_index[0][0] = 0; htdm->matrix_WB_index[0][0] = 0 ; htdm->matrix_WB_index[1][0] = 0; //Bisogner inserire i controlli sulle malloc /*il padding della matrice al layer corrente dipende da quello dei layer precedenti*/ for (unsigned int layer = 1; layer<(TOTAL_LAYER - 1); layer++) { prev_sum[0] = htdm->matrix_H2H_index[0][layer-1]; prev_sum[1] = htdm->matrix_WB_index[0][layer-1]; prev_sum[2] = htdm->matrix_WB_index[1][layer-1]; htdm->matrix_H2H_index[0][layer] = nupl[layer-1] * TOTAL_PATT + prev_sum[0]; htdm->matrix_WB_index[0][layer] = nupl[layer-1] * nupl[layer] + prev_sum[1]; htdm->matrix_WB_index[1][layer] = nupl[layer] + prev_sum[2]; for (int i = 0; i < nupl[layer]; i++) { for (int j = 0; j < nupl[layer+1]; j++) { htdm->WeightH2H[htdm->matrix_WB_index[0][layer] + i*nupl[layer+1] + j] = rand() / (DATA)RAND_MAX; htdm->BiasH2H[htdm->matrix_WB_index[1][layer] + j] = rand() / (DATA)RAND_MAX; } } } for (int i = 0; i < nupl[0]; i++) { for (int j = 0; j < nupl[1]; j++) { htdm->WeightH2H[i*nupl[1] + j] = rand() / (DATA)RAND_MAX; htdm->BiasH2H[j] = rand() / (DATA)RAND_MAX; } } htdm->matrix_H2H_index[0][TOTAL_LAYER - 1] = nupl[TOTAL_LAYER - 2] * TOTAL_PATT + htdm->matrix_H2H_index[0][TOTAL_LAYER-2]; for (int i = 0; i < TOTAL_PATT; i++) { for (int j = 0; j < NEURO_INPUT; j++) { htdm->H2H[i*NEURO_INPUT + j] = rand() / (DATA)RAND_MAX; } } } /*DEALLOCATION FUNCTIONS*/ /*Deallocation of host memory*/ /*Deallocation of device memory (called by host)*/ /* //Per fare il check della correttezza del prodotto MM sul device: //questa parte prima del for del feedforward DATA **H2H_RES = (DATA**)malloc(TOTAL_LAYER * sizeof(DATA*)); for (int i = 0; i < TOTAL_LAYER; i++) { H2H_RES[i] = (DATA*)malloc(TOTAL_PATT*nupl[i] * sizeof(DATA)); } //questa parte dentro il for del feedforward dopo la syncdevice HANDLE_CUDA(hipMemcpy(H2H[l+1], dev_H2H[l+1], (TOTAL_PATT)* nupl[l+1] * sizeof(DATA), hipMemcpyDeviceToHost)); MMMulHost(H2H[l], WeightH2H[l], BiasH2H[l], H2H_RES[l + 1], TOTAL_PATT, nupl[l], nupl[l + 1]); BOOL b = matsAreEquals(H2H_RES[l + 1], H2H[l + 1], TOTAL_PATT, nupl[l + 1]); printf("%d\n", b); */
3347be81e98ca232d0651ae45eaee5f5fa0e374b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define DATA float #define BOOL int #define MAX_ERR 1e-5 //Grid features #define OPTIMUM_BLOCK_NUM 12 #define BLOCK_SIDE 16 #define OPTIMUM_BLOCK_NUM_FIRST_LAYER 2 #define BLOCK_SIDE_FIRST_LAYER 32 //Network features #define NEURO_INPUT 784 //#neurons of input layer #define NEURO_H_0 56 //#neurons of first hidden layer #define NEURO_H_1 28 //#neurons of second hidden layer #define NEURO_OUTPUT 10 //#neurons of output layer #define TOTAL_PATT 60000 //#total patterns #define NUM_HIDDEN 2 //#hidden layers #define TOTAL_LAYER 4 //#of layers #define MATRIX_NUMBER_STRUCT 3 //#matrix to copy to Device(in struct) #define GLOBAL_H_SIZE TOTAL_PATT * (NEURO_INPUT + NEURO_H_0 + NEURO_H_1 +NEURO_OUTPUT) #define NOT_TO_COPY TOTAL_PATT * (NEURO_H_0 + NEURO_H_1 +NEURO_OUTPUT) #define GLOBAL_W_SIZE (NEURO_INPUT*NEURO_H_0) + (NEURO_H_0*NEURO_H_1) + (NEURO_H_1*NEURO_OUTPUT) #define GLOBAL_BIAS_SIZE NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT /*Struct Grid Settings*/ typedef struct grid_settings { unsigned int grid[3]; unsigned int block[3]; }grid_settings; grid_settings gs = { { OPTIMUM_BLOCK_NUM_FIRST_LAYER, OPTIMUM_BLOCK_NUM, OPTIMUM_BLOCK_NUM },{BLOCK_SIDE_FIRST_LAYER,BLOCK_SIDE,BLOCK_SIDE} }; /*Struct One Copy HostToDev*/ typedef struct host_to_dev_mem{ int matrix_WB_index[MATRIX_NUMBER_STRUCT-1][TOTAL_LAYER-1];//INDEX for padding in Weight & Bias int matrix_H2H_index[MATRIX_NUMBER_STRUCT-2][TOTAL_LAYER];//INDEX for padding in H2H DATA WeightH2H[GLOBAL_W_SIZE]; DATA BiasH2H[GLOBAL_BIAS_SIZE]; DATA H2H[GLOBAL_H_SIZE]; } host_to_dev_mem; /*UTILITIES*/ static void HandleCuda(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define HANDLE_CUDA( err ) (HandleCuda( err, __FILE__, __LINE__ )) void startTimer(cudaEvent_t *start, cudaEvent_t *stop) { HANDLE_CUDA(cudaEventCreate(start)); HANDLE_CUDA(cudaEventCreate(stop)); HANDLE_CUDA(cudaEventRecord(*start, 0)); } void stopAndPrint(cudaEvent_t *start, cudaEvent_t *stop) { HANDLE_CUDA(cudaEventRecord(*stop, 0)); HANDLE_CUDA(cudaEventSynchronize(*stop)); float time = 0.0f; HANDLE_CUDA(cudaEventElapsedTime(&time, *start, *stop)); printf("Elapsed Time: %f milliseconds\n", time); HANDLE_CUDA(cudaEventDestroy(*start)); HANDLE_CUDA(cudaEventDestroy(*stop)); } /*HOST*/ void feedforward(struct host_to_dev_mem * , struct host_to_dev_mem*, int *, int); void printMat(DATA *, int, int); void MMMulHost(DATA *, DATA *, DATA *, DATA *, int, int, int); BOOL matsAreEquals(DATA *, DATA *, int, int); /*HOST ALLOCATION AND INITIALIZATION*/ void HOST_init_struct(struct host_to_dev_mem* , int* ); /*---------------------------------------------------------------------KERNEL--------------------------------------------------------------------------*/ /*DEVICE*/ /* h2h č il puntatore alla porzione dell'h2h globale da considerare in questa fase (ad ogni passo il kernel che invoca questo device incrementa il puntatore h2h in modo proporzionale al patt_per_step (e similmente h2h_dest) (vedi sotto))*/ __global__ void MMMulDevPartial(DATA *h2h, DATA *w, DATA *biases, DATA * h2h_dest, unsigned int row_w, unsigned int col_w, unsigned int num_pattern) { const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y if (pos_block_y >= num_pattern) { return; } int tx = threadIdx.x, ty = threadIdx.y; int block_x = blockIdx.x; int block_y = blockIdx.y; const int block_dim = blockDim.x; // assumiamo che i blocchi siano quadrati int dest_x = block_x*block_dim + tx; int dest_y = block_y*block_dim + ty; int w_x = block_x*block_dim; // start block in w int h2h_y = block_y*block_dim*row_w; // start block in h2h int end_h2h = h2h_y + row_w - 1; // last block position in h2h int step_w = block_dim*col_w; int step_h2h = block_dim; int min; DATA partial = 0.0f; int block_r_border = 0; // contatore che indica in che iterazione dei blocchi ci troviamo int current_inc; for (int wid = w_x, h2h_id = h2h_y; h2h_id <= end_h2h; wid += step_w, h2h_id += step_h2h) { block_r_border += block_dim; __shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER+1]; __shared__ DATA shared_h2h[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER+1]; int t_index_w = wid + tx + ty*col_w; int t_index_h2h = h2h_id + tx + ty*row_w; //Attenzione alla divergenza dei threads (vedi CCC pag.137) shared_h2h[ty][tx] = (t_index_h2h < num_pattern*row_w) ? (h2h[t_index_h2h]) : (0.0f); shared_w[ty][tx] = (t_index_w < col_w*row_w) ? (w[t_index_w]) : (0.0f); __syncthreads(); current_inc = row_w - (block_r_border - block_dim); min = (current_inc < block_dim) ? (current_inc) : (block_dim); for (int k = 0; k < min; k++) { partial += shared_h2h[ty][k] * shared_w[k][tx]; } __syncthreads(); } //Attenzione alla divergenza dei threads (vedi CCC pag.137) if (dest_x < col_w && dest_y < num_pattern) { h2h_dest[dest_y*col_w + dest_x] = 1.0 / (1.0 + (float)exp(-(partial + biases[dest_x]))); //SIGMA } } /*patt_per_step č il numero di pattern (quando possibile...) da considerare in ciascuna iterazione su h2h*/ /*Questo kernel ad ogni passo incrementa il puntatore ad h2h di num_patt_per_step*NEURO_L_L_1 (e similmente h2h_dest), controlla che sia ancora nel range di h2h, e calcola num_pattern (vedi sopra) in funzione dei pattern mancanti*/ void MMMulDev(DATA *h2h, DATA *w, DATA *biases, DATA *h2h_dest, unsigned int row_w, unsigned int col_w , unsigned int patt_per_step, dim3 grid, dim3 block) { unsigned int current_patts; unsigned int remaining_patts; //const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y //Assumiamo che i blocchi siano quadrati (blockDim.x = blockDim.y) for (unsigned int x = 0; x < TOTAL_PATT; x += patt_per_step) { remaining_patts = TOTAL_PATT - x; current_patts = (remaining_patts < patt_per_step) ? (remaining_patts) : (patt_per_step); //if (pos_block_y >= current_patts) { return; } MMMulDevPartial << <grid, block >> > (h2h + x*row_w, w, biases, h2h_dest + x*col_w, row_w, col_w, current_patts); } } /*----------------------------------------------------------------------MAIN---------------------------------------------------------------------------*/ int main(void) { int *nupl = (int*)malloc(TOTAL_LAYER * sizeof(int)); /*questa parte bisognerą renderla dinamica. In seguito bisognerą accedere ai files.*/ nupl[0] = NEURO_INPUT; nupl[1] = NEURO_H_0; nupl[2] = NEURO_H_1; nupl[TOTAL_LAYER - 1] = NEURO_OUTPUT; /*host memory allocation and initialization*/ struct host_to_dev_mem *dev_htdm; HANDLE_CUDA(cudaMalloc((void **)&dev_htdm, sizeof(struct host_to_dev_mem))); /*-----------------------------------FEEDFORWARD-------------------------------------------*/ //cudaEvent_t start, stop; struct host_to_dev_mem *htdm = (struct host_to_dev_mem*)malloc(sizeof(struct host_to_dev_mem)); HOST_init_struct(htdm,nupl); //startTimer(&start, &stop); feedforward(htdm, dev_htdm, nupl, TOTAL_LAYER); //stopAndPrint(&start, &stop); /*-----------------------------END---FEEDFORWARD-------------------------------------------*/ //Host dealloc free(nupl); free(htdm); cudaFree(dev_htdm); //HOST_dealloc(H2H_MAT, W_MAT, BIAS_MAT); //Cuda dealloc //CUDA_dealloc(DEV_H2H_MAT, DEV_W_MAT, DEV_BIAS_MAT); /*free(DEV_H2H_MAT); free(DEV_W_MAT); free(DEV_BIAS_MAT); */ return 0; } /*HOST*/ /*FIRT PHASE OF THE ALGORITHM -- THE INPUT IS TRANSMITTED VIA THE NETWORK*/ /* */ void feedforward(struct host_to_dev_mem * str_htdm, struct host_to_dev_mem *dev_htdm, int *nupl, int layers) { cudaEvent_t start, stop; //Grid setting dim3 grid, block; unsigned int patt_per_step; startTimer(&start, &stop); HANDLE_CUDA(cudaMemcpy(dev_htdm, str_htdm ,sizeof(struct host_to_dev_mem) - NOT_TO_COPY*sizeof(DATA),cudaMemcpyHostToDevice)); stopAndPrint(&start, &stop); /* DATA **H2H_RES = (DATA**)malloc(TOTAL_LAYER * sizeof(DATA*)); for (int i = 0; i < TOTAL_LAYER; i++) { H2H_RES[i] = (DATA*)malloc(TOTAL_PATT*nupl[i] * sizeof(DATA)); } */ for (int l = 0; l < (layers - 1); l++) { block.x = gs.block[l]; block.y = gs.block[l]; grid.x = (nupl[l+1] + block.x - 1) / block.x; grid.y = gs.grid[l] / grid.x; patt_per_step = grid.y * block.y; //printf("block x = %u block y = %u grid x = %u grid y = %u pattern per step = %u\n", block.x, block.y, grid.x, grid.y, patt_per_step); startTimer(&start, &stop); MMMulDev( dev_htdm->H2H + str_htdm->matrix_H2H_index[0][l], dev_htdm->WeightH2H + str_htdm->matrix_WB_index[0][l] , dev_htdm->BiasH2H + str_htdm->matrix_WB_index[1][l], dev_htdm->H2H + str_htdm->matrix_H2H_index[0][l+1] , nupl[l], nupl[l+1], patt_per_step, grid, block); stopAndPrint(&start, &stop); cudaDeviceSynchronize(); /* HANDLE_CUDA(cudaMemcpy(str_htdm->H2H+ str_htdm->matrix_H2H_index[0][l+1],dev_htdm->H2H + str_htdm->matrix_H2H_index[0][l+1], (TOTAL_PATT)* nupl[l+1] * sizeof(DATA), cudaMemcpyDeviceToHost)); MMMulHost( str_htdm->H2H + str_htdm->matrix_H2H_index[0][l], str_htdm->WeightH2H + str_htdm->matrix_WB_index[0][l] , str_htdm->BiasH2H + str_htdm->matrix_WB_index[1][l], H2H_RES[l + 1], TOTAL_PATT, nupl[l], nupl[l + 1]); BOOL b = matsAreEquals(str_htdm->H2H+ str_htdm->matrix_H2H_index[0][l+1], H2H_RES[l + 1], TOTAL_PATT, nupl[l + 1]); printf("%d\n", b);*/ } } /*UTILITY FUNCTIONS*/ /*Print a matrix*/ void printMat(DATA *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { printf("ROW %d : {", i); for (int j = 0; j < cols; j++) { printf("%f - ", mat[i*cols + j]); } printf("}"); printf("\n\n"); } printf("\n\n"); } /*On host multiplication*/ void MMMulHost(DATA *H2H, DATA *W, DATA *BIAS, DATA *H2H_RES, int row_H2H, int col_H2H, int col_W) { for (int i = 0; i < row_H2H; i++) { for (int j = 0; j < col_W; j++) { DATA prod = 0.0; for (int k = 0; k < col_H2H; k++) { prod += H2H[i*col_H2H + k] * W[k*col_W + j]; } H2H_RES[i*col_W + j] = 1.0 / (1.0 + (float)exp(-(prod + BIAS[j]))); // bias added } } } /*Check device*/ BOOL matsAreEquals(DATA *A, DATA *B, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // the first column is for adapting the data float err = fabs(A[i*cols + j] - B[i*cols + j]); //printf("Error in i=%d,j=%d: %f\n", i, j, err); if (err >= MAX_ERR) { printf("row: %d, col: %d\n", i, j); return 0; } } } return 1; } /*ALLOCATION FUNCTIONS*/ /* init struct on host */ void HOST_init_struct(struct host_to_dev_mem* htdm, int* nupl) { int prev_sum[MATRIX_NUMBER_STRUCT]; htdm->matrix_H2H_index[0][0] = 0; htdm->matrix_WB_index[0][0] = 0 ; htdm->matrix_WB_index[1][0] = 0; //Bisognerą inserire i controlli sulle malloc /*il padding della matrice al layer corrente dipende da quello dei layer precedenti*/ for (unsigned int layer = 1; layer<(TOTAL_LAYER - 1); layer++) { prev_sum[0] = htdm->matrix_H2H_index[0][layer-1]; prev_sum[1] = htdm->matrix_WB_index[0][layer-1]; prev_sum[2] = htdm->matrix_WB_index[1][layer-1]; htdm->matrix_H2H_index[0][layer] = nupl[layer-1] * TOTAL_PATT + prev_sum[0]; htdm->matrix_WB_index[0][layer] = nupl[layer-1] * nupl[layer] + prev_sum[1]; htdm->matrix_WB_index[1][layer] = nupl[layer] + prev_sum[2]; for (int i = 0; i < nupl[layer]; i++) { for (int j = 0; j < nupl[layer+1]; j++) { htdm->WeightH2H[htdm->matrix_WB_index[0][layer] + i*nupl[layer+1] + j] = rand() / (DATA)RAND_MAX; htdm->BiasH2H[htdm->matrix_WB_index[1][layer] + j] = rand() / (DATA)RAND_MAX; } } } for (int i = 0; i < nupl[0]; i++) { for (int j = 0; j < nupl[1]; j++) { htdm->WeightH2H[i*nupl[1] + j] = rand() / (DATA)RAND_MAX; htdm->BiasH2H[j] = rand() / (DATA)RAND_MAX; } } htdm->matrix_H2H_index[0][TOTAL_LAYER - 1] = nupl[TOTAL_LAYER - 2] * TOTAL_PATT + htdm->matrix_H2H_index[0][TOTAL_LAYER-2]; for (int i = 0; i < TOTAL_PATT; i++) { for (int j = 0; j < NEURO_INPUT; j++) { htdm->H2H[i*NEURO_INPUT + j] = rand() / (DATA)RAND_MAX; } } } /*DEALLOCATION FUNCTIONS*/ /*Deallocation of host memory*/ /*Deallocation of device memory (called by host)*/ /* //Per fare il check della correttezza del prodotto MM sul device: //questa parte prima del for del feedforward DATA **H2H_RES = (DATA**)malloc(TOTAL_LAYER * sizeof(DATA*)); for (int i = 0; i < TOTAL_LAYER; i++) { H2H_RES[i] = (DATA*)malloc(TOTAL_PATT*nupl[i] * sizeof(DATA)); } //questa parte dentro il for del feedforward dopo la syncdevice HANDLE_CUDA(cudaMemcpy(H2H[l+1], dev_H2H[l+1], (TOTAL_PATT)* nupl[l+1] * sizeof(DATA), cudaMemcpyDeviceToHost)); MMMulHost(H2H[l], WeightH2H[l], BiasH2H[l], H2H_RES[l + 1], TOTAL_PATT, nupl[l], nupl[l + 1]); BOOL b = matsAreEquals(H2H_RES[l + 1], H2H[l + 1], TOTAL_PATT, nupl[l + 1]); printf("%d\n", b); */
1172e3dfbd3e01a5dfe6d1776b562e1bdd8792b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sum4(float *A, float *B, float *C, const int N) { int j; int i = blockIdx.x * blockDim.x + threadIdx.x; #pragma unroll for (j=0; j < 4; j++) if (i < N) { C[i] = A[i] + B[i]; i += blockDim.x * gridDim.x; } }
1172e3dfbd3e01a5dfe6d1776b562e1bdd8792b6.cu
#include "includes.h" __global__ void sum4(float *A, float *B, float *C, const int N) { int j; int i = blockIdx.x * blockDim.x + threadIdx.x; #pragma unroll for (j=0; j < 4; j++) if (i < N) { C[i] = A[i] + B[i]; i += blockDim.x * gridDim.x; } }
5122ae746e37ccabcac9d2a48e98f06512c50c26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <rocblas.h> #include "cuda_utils.h" __device__ int get_system_bid(){ return blockIdx.z*gridDim.y + blockIdx.y; } __global__ void cudaRoutineFlat(int offset, float * d_arr){ for (int thread_index = 0; thread_index < blockDim.x; thread_index++){ if (threadIdx.x == thread_index){ printf("%.6f\t", d_arr[offset+threadIdx.x]); } __syncthreads(); } if (threadIdx.x == 0){ printf("\n"); } } __global__ void cudaRoutineFlatInt(int Neqn_p_sys, int * d_arr){ printf("int: %d - %d\n",threadIdx.x,d_arr[threadIdx.x]); } __global__ void cudaRoutine(int Neqn_p_sys, float ** d_arr,int index){ printf("%d - %.2f hello\n",threadIdx.x,d_arr[index][threadIdx.x]); } __global__ void printfCUDA(float * pointer){ printf("%f value of cuda pointer \n",*pointer); } __global__ void printFloatArrayCUDA(float * pointer, int Narr){ // safety in case it's called with a bunch of threads lol if (threadIdx.x == 0 && blockIdx.x == 0){ for (int i = 0; i<Narr; i++){ printf("%.2e \t",pointer[i]); } printf("\n"); } } __global__ void checkCublasINFO( int * INFO, int * bool_flag, int Nsystems){ // replace the values of v1 with the error int bid = get_system_bid(); if (bid < Nsystems){ if (INFO[bid]){ *bool_flag = 1; } } } void checkCublasErrorState(int * INFO,int * d_INFO_bool,int INFO_bool,int Nsystems, dim3 ode_gridDim){ hipLaunchKernelGGL(( checkCublasINFO), dim3(ode_gridDim),dim3(1), 0, 0, INFO, d_INFO_bool,Nsystems); hipMemcpy(&INFO_bool,&d_INFO_bool,sizeof(int),hipMemcpyDeviceToHost); printf("INFO: %d \n",INFO_bool); INFO_bool = 0; hipMemcpy(d_INFO_bool,&INFO_bool,sizeof(int),hipMemcpyHostToDevice); } const char *_cudaGetErrorEnum(hipblasStatus_t error) { switch (error) { case HIPBLAS_STATUS_SUCCESS: printf("HIPBLAS_STATUS_SUCCESS\n"); return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: printf("HIPBLAS_STATUS_NOT_INITIALIZED\n"); return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: printf("HIPBLAS_STATUS_ALLOC_FAILED\n"); return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: printf("HIPBLAS_STATUS_INVALID_VALUE\n"); return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: printf("HIPBLAS_STATUS_ARCH_MISMATCH\n"); return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: printf("HIPBLAS_STATUS_MAPPING_ERROR\n"); return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: printf("HIPBLAS_STATUS_EXECUTION_FAILED\n"); return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: printf("HIPBLAS_STATUS_INTERNAL_ERROR\n"); return "HIPBLAS_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } __device__ void cudaBreakpoint(){ ; }
5122ae746e37ccabcac9d2a48e98f06512c50c26.cu
#include <stdio.h> #include <cublas_v2.h> #include "cuda_utils.h" __device__ int get_system_bid(){ return blockIdx.z*gridDim.y + blockIdx.y; } __global__ void cudaRoutineFlat(int offset, float * d_arr){ for (int thread_index = 0; thread_index < blockDim.x; thread_index++){ if (threadIdx.x == thread_index){ printf("%.6f\t", d_arr[offset+threadIdx.x]); } __syncthreads(); } if (threadIdx.x == 0){ printf("\n"); } } __global__ void cudaRoutineFlatInt(int Neqn_p_sys, int * d_arr){ printf("int: %d - %d\n",threadIdx.x,d_arr[threadIdx.x]); } __global__ void cudaRoutine(int Neqn_p_sys, float ** d_arr,int index){ printf("%d - %.2f hello\n",threadIdx.x,d_arr[index][threadIdx.x]); } __global__ void printfCUDA(float * pointer){ printf("%f value of cuda pointer \n",*pointer); } __global__ void printFloatArrayCUDA(float * pointer, int Narr){ // safety in case it's called with a bunch of threads lol if (threadIdx.x == 0 && blockIdx.x == 0){ for (int i = 0; i<Narr; i++){ printf("%.2e \t",pointer[i]); } printf("\n"); } } __global__ void checkCublasINFO( int * INFO, int * bool_flag, int Nsystems){ // replace the values of v1 with the error int bid = get_system_bid(); if (bid < Nsystems){ if (INFO[bid]){ *bool_flag = 1; } } } void checkCublasErrorState(int * INFO,int * d_INFO_bool,int INFO_bool,int Nsystems, dim3 ode_gridDim){ checkCublasINFO<<<ode_gridDim,1>>>(INFO, d_INFO_bool,Nsystems); cudaMemcpy(&INFO_bool,&d_INFO_bool,sizeof(int),cudaMemcpyDeviceToHost); printf("INFO: %d \n",INFO_bool); INFO_bool = 0; cudaMemcpy(d_INFO_bool,&INFO_bool,sizeof(int),cudaMemcpyHostToDevice); } const char *_cudaGetErrorEnum(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: printf("CUBLAS_STATUS_SUCCESS\n"); return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: printf("CUBLAS_STATUS_NOT_INITIALIZED\n"); return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: printf("CUBLAS_STATUS_ALLOC_FAILED\n"); return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: printf("CUBLAS_STATUS_INVALID_VALUE\n"); return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: printf("CUBLAS_STATUS_ARCH_MISMATCH\n"); return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: printf("CUBLAS_STATUS_MAPPING_ERROR\n"); return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: printf("CUBLAS_STATUS_EXECUTION_FAILED\n"); return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: printf("CUBLAS_STATUS_INTERNAL_ERROR\n"); return "CUBLAS_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } __device__ void cudaBreakpoint(){ ; }
068a878639f3ffd90653d827a7ab1552447883a6.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <limits> namespace at::native { #if AT_USE_JITERATOR() CONSTEXPR_EXCEPT_WIN_CUDA char cosh_name[] = "cosh_impl"; #endif void cosh_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR() static const auto cosh_string = jiterator_stringify( template <typename T> T cosh_impl(T a) { return std::cosh(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "cosh_name", [&]() { jitted_gpu_kernel< /*name=*/cosh_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, cosh_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "cosh_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::cosh(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "cosh_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cosh(a); }); }); } } REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda); } // namespace at::native
068a878639f3ffd90653d827a7ab1552447883a6.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <limits> namespace at::native { #if AT_USE_JITERATOR() CONSTEXPR_EXCEPT_WIN_CUDA char cosh_name[] = "cosh_impl"; #endif void cosh_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR() static const auto cosh_string = jiterator_stringify( template <typename T> T cosh_impl(T a) { return std::cosh(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "cosh_name", [&]() { jitted_gpu_kernel< /*name=*/cosh_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, cosh_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "cosh_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::cosh(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "cosh_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cosh(a); }); }); } } REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda); } // namespace at::native
c73bc00dcaa6ef2b23514b67790bf11347ff9c76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************* * contractions.cu * * Sat Jul 2 11:19:43 CEST 2011 * * PURPOSE: * TODO: * DONE: * CHANGES: *********************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #ifdef MPI # include <mpi.h> # include <fftw_mpi.h> #else # include <fftw.h> #endif #include <getopt.h> #define MAIN_PROGRAM extern "C" { #include "cvc_complex.h" #include "cvc_linalg.h" #include "global.h" #include "cvc_geometry.h" #include "cvc_utils.h" #include "contractions_io.h" #include "read_input_parser.h" #include "contractions.h" } __constant__ int devT, devL; __constant__ float devMu, devMq; __constant__ float dev_cvc_coeff[2304]; /************************************************************* * the kernel for contract cvc *************************************************************/ __global__ void cvc_kernel (float*cvc_out, float*ct_out, unsigned int N) { int j0, j1, j2, j3, i0, i1, i2, i3; unsigned int L1, L2, L3, V4, imu, inu, icount, rest; // unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int isigma_mu, isigma_nu, ilambda, ikappa; float2 sp[6], sq[6]; float p[4], phalf[4], sinp[4], cosp[4], sinphalf[4], sinq[4], sinqhalf[4]; float q[4], qhalf[4], k[4]; // float khalf[4]; float2 phase[2][2]; float ftmp; float2 cvc_tmp[16], f2tmp, f2tmp2, counter_term[4]; float fTinv, fLinv; float aMp, aK2p, denomp; float aMq, aK2q, denomq; // get external momentum k from idx L1 = devL; L2 = L1 * L1; L3 = L2 * L1; V4 = L3 * devT; if(idx < N) { // initialize counter_term[0].x = 0.; counter_term[0].y = 0.; counter_term[1].x = 0.; counter_term[1].y = 0.; counter_term[2].x = 0.; counter_term[2].y = 0.; counter_term[3].x = 0.; counter_term[3].y = 0.; ftmp = -3.; cvc_tmp[ 0].x = ftmp; cvc_tmp[ 0].y = ftmp; cvc_tmp[ 1].x = ftmp; cvc_tmp[ 1].y = ftmp; cvc_tmp[ 2].x = ftmp; cvc_tmp[ 2].y = ftmp; cvc_tmp[ 3].x = ftmp; cvc_tmp[ 3].y = ftmp; cvc_tmp[ 4].x = ftmp; cvc_tmp[ 4].y = ftmp; cvc_tmp[ 5].x = ftmp; cvc_tmp[ 5].y = ftmp; cvc_tmp[ 6].x = ftmp; cvc_tmp[ 6].y = ftmp; cvc_tmp[ 7].x = ftmp; cvc_tmp[ 7].y = ftmp; cvc_tmp[ 8].x = ftmp; cvc_tmp[ 8].y = ftmp; cvc_tmp[ 9].x = ftmp; cvc_tmp[ 9].y = ftmp; cvc_tmp[10].x = ftmp; cvc_tmp[10].y = ftmp; cvc_tmp[11].x = ftmp; cvc_tmp[11].y = ftmp; cvc_tmp[12].x = ftmp; cvc_tmp[12].y = ftmp; cvc_tmp[13].x = ftmp; cvc_tmp[13].y = ftmp; cvc_tmp[14].x = ftmp; cvc_tmp[14].y = ftmp; cvc_tmp[15].x = ftmp; cvc_tmp[15].y = ftmp; ftmp = -(float)idx; cvc_out[_GWI( 0,idx,V4) ] = ftmp; cvc_out[_GWI( 0,idx,V4)+1] = ftmp; cvc_out[_GWI( 1,idx,V4) ] = ftmp; cvc_out[_GWI( 1,idx,V4)+1] = ftmp; cvc_out[_GWI( 2,idx,V4) ] = ftmp; cvc_out[_GWI( 2,idx,V4)+1] = ftmp; cvc_out[_GWI( 3,idx,V4) ] = ftmp; cvc_out[_GWI( 3,idx,V4)+1] = ftmp; cvc_out[_GWI( 4,idx,V4) ] = ftmp; cvc_out[_GWI( 4,idx,V4)+1] = ftmp; cvc_out[_GWI( 5,idx,V4) ] = ftmp; cvc_out[_GWI( 5,idx,V4)+1] = ftmp; cvc_out[_GWI( 6,idx,V4) ] = ftmp; cvc_out[_GWI( 6,idx,V4)+1] = ftmp; cvc_out[_GWI( 7,idx,V4) ] = ftmp; cvc_out[_GWI( 7,idx,V4)+1] = ftmp; cvc_out[_GWI( 8,idx,V4) ] = ftmp; cvc_out[_GWI( 8,idx,V4)+1] = ftmp; cvc_out[_GWI( 9,idx,V4) ] = ftmp; cvc_out[_GWI( 9,idx,V4)+1] = ftmp; cvc_out[_GWI(10,idx,V4) ] = ftmp; cvc_out[_GWI(10,idx,V4)+1] = ftmp; cvc_out[_GWI(11,idx,V4) ] = ftmp; cvc_out[_GWI(11,idx,V4)+1] = ftmp; cvc_out[_GWI(12,idx,V4) ] = ftmp; cvc_out[_GWI(12,idx,V4)+1] = ftmp; cvc_out[_GWI(13,idx,V4) ] = ftmp; cvc_out[_GWI(13,idx,V4)+1] = ftmp; cvc_out[_GWI(14,idx,V4) ] = ftmp; cvc_out[_GWI(14,idx,V4)+1] = ftmp; cvc_out[_GWI(15,idx,V4) ] = ftmp; cvc_out[_GWI(15,idx,V4)+1] = ftmp; j0 = idx / L3; icount = idx - L3*j0; j1 = icount / L2; icount = icount - L2*j1; j2 = icount / L1; j3 = icount - j2*L1; fTinv = 2. * _PI / (float)( devT ); fLinv = 2. * _PI / (float)( devL ); k[0] = (float)(j0) * fTinv; k[1] = (float)(j1) * fLinv; k[2] = (float)(j2) * fLinv; k[3] = (float)(j3) * fLinv; /* khalf[0] = 0.5 * k[0]; khalf[1] = 0.5 * k[1]; khalf[2] = 0.5 * k[2]; khalf[3] = 0.5 * k[3]; */ if(idx==102) { counter_term[0].x = fTinv; counter_term[0].y = fLinv; counter_term[1].x = k[0]; counter_term[1].y = k[1]; counter_term[2].x = k[2]; counter_term[2].y = k[3]; counter_term[3].x = (float)N; counter_term[3].y = (float)V4; } // loop on internal momentum p (summation) i0=0; i1=0; i2=0; i3=0; for(icount=0; icount<V4; icount++) { p[0] = ((float)(i0) + 0.5) * fTinv; phalf[0] = p[0] * 0.5; q[0] = ( ( (float)(i0) + (float)(j0) ) + 0.5 ) * fTinv; qhalf[0] = q[0] * 0.5; sinp[0] = sin( p[0] ); cosp[0] = cos( p[0] ); sinphalf[0] = sin( phalf[0] ); sinq[0] = sin( q[0] ); sinqhalf[0] = sin( qhalf[0] ); p[1] = (float)(i1) * fLinv; phalf[1] = p[1] * 0.5; q[1] = ( (float)(i1) + (float)(j1) ) * fLinv; qhalf[1] = q[1] * 0.5; sinp[1] = sin( p[1] ); cosp[1] = cos( p[1] ); sinphalf[1] = sin( phalf[1] ); sinq[1] = sin( q[1] ); sinqhalf[1] = sin( qhalf[1] ); p[2] = (float)(i2) * fLinv; phalf[2] = p[2] * 0.5; q[2] = ( (float)(i2) + (float)(j2) ) * fLinv; qhalf[2] = q[2] * 0.5; sinp[2] = sin( p[2] ); cosp[2] = cos( p[2] ); sinphalf[2] = sin( phalf[2] ); sinq[2] = sin( q[2] ); sinqhalf[2] = sin( qhalf[2] ); p[3] = (float)(i3) * fLinv; phalf[3] = p[3] * 0.5; q[3] = ( (float)(i3) + (float)(j3) ) * fLinv; qhalf[3] = q[3] * 0.5; sinp[3] = sin( p[3] ); cosp[3] = cos( p[3] ); sinphalf[3] = sin( phalf[3] ); sinq[3] = sin( q[3] ); sinqhalf[3] = sin( qhalf[3] ); aMp = devMq + 2. * (_SQR(sinphalf[0]) + _SQR(sinphalf[1]) + _SQR(sinphalf[2]) + _SQR(sinphalf[3])); aMq = devMq + 2. * (_SQR(sinqhalf[0]) + _SQR(sinqhalf[1]) + _SQR(sinqhalf[2]) + _SQR(sinqhalf[3])); aK2p = _SQR(sinp[0]) + _SQR(sinp[1]) + _SQR(sinp[2]) + _SQR(sinp[3]); aK2q = _SQR(sinq[0]) + _SQR(sinq[1]) + _SQR(sinq[2]) + _SQR(sinq[3]); denomp = 1. / ( aK2p + aMp*aMp + devMu*devMu ); denomq = 1. / ( aK2q + aMq*aMq + devMu*devMu ); sp[0].y = -sinp[0] * denomp; sp[1].y = -sinp[1] * denomp; sp[2].y = -sinp[2] * denomp; sp[3].y = -sinp[3] * denomp; sp[4].x = aMp * denomp; sp[5].y = -devMu * denomp; sq[0].y = -sinq[0] * denomq; sq[1].y = -sinq[1] * denomq; sq[2].y = -sinq[2] * denomq; sq[3].y = -sinq[3] * denomq; sq[4].x = aMq * denomq; sq[5].y = -devMu * denomq; _dev_set_phase(phase,p,k,0,0); cvc_tmp[0]. x += 1.; cvc_tmp[0].y += -2.; /* _cvc_accum( cvc_tmp[ 0], 0, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 0, 1 ); _cvc_accum( cvc_tmp[ 1], 0, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 0, 2 ); _cvc_accum( cvc_tmp[ 2], 0, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 0, 3 ); _cvc_accum( cvc_tmp[ 3], 0, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 1, 0 ); _cvc_accum( cvc_tmp[ 4], 1, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 1, 1 ); _cvc_accum( cvc_tmp[ 5], 1, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 1, 2 ); _cvc_accum( cvc_tmp[ 6], 1, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 1, 3 ); _cvc_accum( cvc_tmp[ 7], 1, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 2, 0 ); _cvc_accum( cvc_tmp[ 8], 2, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 2, 1 ); _cvc_accum( cvc_tmp[ 9], 2, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 2, 2 ); _cvc_accum( cvc_tmp[10], 2, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 2, 3 ); _cvc_accum( cvc_tmp[11], 2, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 3, 0 ); _cvc_accum( cvc_tmp[12], 3, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 3, 1 ); _cvc_accum( cvc_tmp[13], 3, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 3, 2 ); _cvc_accum( cvc_tmp[14], 3, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 3, 3 ); _cvc_accum( cvc_tmp[15], 3, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* if( idx == 0 ){ counter_term[0].x += sinp[0] * sp[0].y + sp[4].x * cosp[0]; counter_term[0].y += -sinp[0] * sp[0].x + sp[4].y * cosp[0]; counter_term[1].x += sinp[1] * sp[1].y + sp[4].x * cosp[1]; counter_term[1].y += -sinp[1] * sp[1].x + sp[4].y * cosp[1]; counter_term[2].x += sinp[2] * sp[2].y + sp[4].x * cosp[2]; counter_term[2].y += -sinp[2] * sp[2].x + sp[4].y * cosp[2]; counter_term[3].x += sinp[3] * sp[3].y + sp[4].x * cosp[3]; counter_term[3].y += -sinp[3] * sp[3].x + sp[4].y * cosp[3]; } */ // increase the coordinates i0,...,i3 i3 += 1; rest = (i3==L1); i3 -= rest*L1; i2 += rest; rest = (i2==L1); i2 -= rest*L1; i1 += rest; rest = (i1==L1); i1 -= rest*L1; i0 += rest; rest = (i0==devT); i0 -= rest*devT; } // loop on icount // normalisation ftmp = 0.25 * _NSPIN * _NCOLOR / ( (float)(devT) * (float)(L1) * (float)(L1) * (float)(L1) ); cvc_out[_GWI( 0,idx,V4) ] = -cvc_tmp[ 0].x*ftmp; cvc_out[_GWI( 0,idx,V4)+1] = -cvc_tmp[ 0].y*ftmp; cvc_out[_GWI( 1,idx,V4) ] = -cvc_tmp[ 1].x*ftmp; cvc_out[_GWI( 1,idx,V4)+1] = -cvc_tmp[ 1].y*ftmp; cvc_out[_GWI( 2,idx,V4) ] = -cvc_tmp[ 2].x*ftmp; cvc_out[_GWI( 2,idx,V4)+1] = -cvc_tmp[ 2].y*ftmp; cvc_out[_GWI( 3,idx,V4) ] = -cvc_tmp[ 3].x*ftmp; cvc_out[_GWI( 3,idx,V4)+1] = -cvc_tmp[ 3].y*ftmp; cvc_out[_GWI( 4,idx,V4) ] = -cvc_tmp[ 4].x*ftmp; cvc_out[_GWI( 4,idx,V4)+1] = -cvc_tmp[ 4].y*ftmp; cvc_out[_GWI( 5,idx,V4) ] = -cvc_tmp[ 5].x*ftmp; cvc_out[_GWI( 5,idx,V4)+1] = -cvc_tmp[ 5].y*ftmp; cvc_out[_GWI( 6,idx,V4) ] = -cvc_tmp[ 6].x*ftmp; cvc_out[_GWI( 6,idx,V4)+1] = -cvc_tmp[ 6].y*ftmp; cvc_out[_GWI( 7,idx,V4) ] = -cvc_tmp[ 7].x*ftmp; cvc_out[_GWI( 7,idx,V4)+1] = -cvc_tmp[ 7].y*ftmp; cvc_out[_GWI( 8,idx,V4) ] = -cvc_tmp[ 8].x*ftmp; cvc_out[_GWI( 8,idx,V4)+1] = -cvc_tmp[ 8].y*ftmp; cvc_out[_GWI( 9,idx,V4) ] = -cvc_tmp[ 9].x*ftmp; cvc_out[_GWI( 9,idx,V4)+1] = -cvc_tmp[ 9].y*ftmp; cvc_out[_GWI(10,idx,V4) ] = -cvc_tmp[10].x*ftmp; cvc_out[_GWI(10,idx,V4)+1] = -cvc_tmp[10].y*ftmp; cvc_out[_GWI(11,idx,V4) ] = -cvc_tmp[11].x*ftmp; cvc_out[_GWI(11,idx,V4)+1] = -cvc_tmp[11].y*ftmp; cvc_out[_GWI(12,idx,V4) ] = -cvc_tmp[12].x*ftmp; cvc_out[_GWI(12,idx,V4)+1] = -cvc_tmp[12].y*ftmp; cvc_out[_GWI(13,idx,V4) ] = -cvc_tmp[13].x*ftmp; cvc_out[_GWI(13,idx,V4)+1] = -cvc_tmp[13].y*ftmp; cvc_out[_GWI(14,idx,V4) ] = -cvc_tmp[14].x*ftmp; cvc_out[_GWI(14,idx,V4)+1] = -cvc_tmp[14].y*ftmp; cvc_out[_GWI(15,idx,V4) ] = -cvc_tmp[15].x*ftmp; cvc_out[_GWI(15,idx,V4)+1] = -cvc_tmp[15].y*ftmp; ftmp *= 4.; // if(idx==0) if(idx==102) { ct_out[0] = -counter_term[0].x * ftmp; ct_out[1] = -counter_term[0].y * ftmp; ct_out[2] = -counter_term[1].x * ftmp; ct_out[3] = -counter_term[1].y * ftmp; ct_out[4] = -counter_term[2].x * ftmp; ct_out[5] = -counter_term[2].y * ftmp; ct_out[6] = -counter_term[3].x * ftmp; ct_out[7] = -counter_term[3].y * ftmp; } } // of if idx < N } /********************************************************************** * main program **********************************************************************/ int main(int argc, char **argv) { // int status; int c, filename_set=0, verbose=0; int mu, nu, x0, x1, x2, x3, ix; int imu, inu, isigma_mu, isigma_nu, ikappa, ilambda; unsigned int threadsPerBlock, blocksPerGrid; int i; double delta_mn, delta_mk, delta_nk, delta_ml, delta_nl, delta_lk; double sigma_mu, sigma_nu; float cvc_coeff[2304], phase[4]; double *dptr = NULL; float *fptr = NULL; // const double CVC_EPS = 5.e-15; void *cvc=NULL, *counter_term; // float WI_check; float ftmp; complex w, w1; char filename[80], contype[200]; float *dev_cvc, *dev_ct; hipDeviceProp_t prop; while ((c = getopt(argc, argv, "h?f:v:")) != -1) { switch (c) { case 'f': strcpy(filename, optarg); filename_set=1; break; case 'v': verbose = atoi( optarg ); fprintf(stdout, "\n# [] using verbose mode %d\n", verbose); break; default: //usage(); break; } } /* get the time stamp */ g_the_time = time(NULL); /* set the default values */ if(filename_set==0) strcpy(filename, "cvc.input"); fprintf(stdout, "# Reading input from file %s\n", filename); read_input_parser(filename); T = T_global; L = LX; if(init_geometry() != 0) { fprintf(stderr, "ERROR from init_geometry\n"); exit(1); } geometry(); /*********************************************** * device properties ***********************************************/ HANDLE_ERROR( hipGetDevice(&c) ); HANDLE_ERROR(hipGetDeviceProperties(&prop, c) ); fprintf(stdout, "\n--- General info for device no. %d\n", c); fprintf(stdout, "Name: %s\n", prop.name); fprintf(stdout, "Compute capability: %d.%d\n", prop.major, prop.minor); printf("Clock rate: %d\n", prop.clockRate); printf("Device copy overlap: "); if(prop.deviceOverlap) { printf("Enabled\n"); } else { printf("Disabled\n"); } printf("Kernel execution timeout: "); if(prop.kernelExecTimeoutEnabled) { printf("Enabled\n"); } else { printf("Disabled\n"); } printf("\n--- Memory info for device no. %d\n", c); printf("Total global mem: %ld\n", prop.totalGlobalMem); printf("Total constant mem: %ld\n", prop.totalConstMem); printf("Max mem pitch: %ld\n", prop.memPitch); printf("Texture alignment: %ld\n", prop.textureAlignment); printf("\n--- MP info for device no. %d\n", c); printf("Multiprocessor count: %d\n", prop.multiProcessorCount); printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock); printf("Registers mem per mp: %d\n", prop.regsPerBlock); printf("Threads in warp: %d\n", prop.warpSize); printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); printf("Max thread dimension: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("Max grid dimension: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\n\n"); /*********************************************** * set number of threads and blocks ***********************************************/ threadsPerBlock = THREADS_PER_BLOCK; blocksPerGrid = (VOLUME + threadsPerBlock-1)/threadsPerBlock; fprintf(stdout, "\n# [contractions] number threads per block: %u\n", threadsPerBlock); fprintf(stdout, "\n# [contractions] number blocks per grid : %u\n", blocksPerGrid); // allocate memory for cvc cvc = calloc( 32*VOLUME, sizeof(double) ); counter_term = calloc( 8, sizeof(double) ); if( cvc == NULL || counter_term==NULL) { fprintf(stderr, "\nError, could not alloc cvc\n"); exit(2); } /*************************** * initialize on host ***************************/ for(imu=0;imu<2304;imu++) cvc_coeff[imu] = 0.; // set the coefficients for the correlation functions for(imu=0; imu<4;imu++) { for(inu=0; inu<4;inu++) { delta_mn = (float)(imu==inu); for(isigma_mu=0; isigma_mu<2;isigma_mu++) { for(isigma_nu=0; isigma_nu<2;isigma_nu++) { sigma_mu = 2.*isigma_mu-1.; sigma_nu = 2.*isigma_nu-1.; // C_4_4 cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, 4, 4) ] = delta_mn + sigma_mu*sigma_nu; // C_4_5, C_5_4, C_l_5, C_5_k // all 0 // C_5_5 cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, 5, 5) ] = -delta_mn + sigma_mu*sigma_nu; // C_4_k for(ikappa=0;ikappa<4;ikappa++) { delta_mk = (float)( imu == ikappa) ; delta_nk = (float)( inu == ikappa ); cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, 4, ikappa) ] = delta_mk*sigma_nu + delta_nk*sigma_mu; } // C_l_4 for( ilambda=0; ilambda<4;ilambda++) { delta_ml = (float)(imu==ilambda); delta_nl = (float)(inu==ilambda); cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 4) ] = delta_ml*sigma_nu + delta_nl*sigma_mu; } // C_l_k for(ilambda=0; ilambda<4;ilambda++) { for(ikappa=0; ikappa<4; ikappa++ ) { //************************************* //************************************* delta_ml = (float)(imu==ilambda); // ************************************ delta_mk = (float)(imu==ikappa); // ************************************ // ************************************ delta_nl = (float)(inu==ilambda); // ************************************ delta_nk = (float)(inu==ikappa); // ************************************ // ************************************ delta_lk = (float)(ilambda==ikappa); // ************************************ // ************************************ cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, ikappa) ] = \ delta_ml*delta_nk - delta_mn*delta_lk + delta_mk*delta_nl + delta_lk*sigma_mu*sigma_nu; }} }} // of isigma_mu, isigma_nu }} // of imu, inu /************************************************************************** * test: print the matrix cvc_coeff **************************************************************************/ if(verbose > 0) { for(imu=0;imu<4;imu++) { for(inu=0;inu<4;inu++) { fprintf(stdout, "# ---------------------------------------------------------------\n"); fprintf(stdout, "# imu = %d; inu = %d\n", imu, inu); for(isigma_mu=0;isigma_mu<2;isigma_mu++) { for(isigma_nu=0;isigma_nu<2;isigma_nu++) { fprintf(stdout, "# ---------------------------------------------------------------\n"); fprintf(stdout, "#\t sigma_mu = %e; sigma_nu = %e\n", 2.*isigma_mu-1., 2.*isigma_nu-1.); for(ilambda=0;ilambda<6;ilambda++) { fprintf(stdout, "%5.3f\t%5.3f\t%5.3f\t%5.3f\t%5.3f\t%5.3f\n", cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 0) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 1) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 2) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 3) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 4) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 5) ] ); } }} }} } /*************************************** * allocate fields, initialize on device ***************************************/ HANDLE_ERROR( hipMalloc(&dev_cvc, 32*VOLUME*sizeof(float)) ); HANDLE_ERROR( hipMalloc(&dev_ct, 8*sizeof(float)) ); HANDLE_ERROR( hipMemcpyToSymbol( "devT", &T, sizeof(int), 0, hipMemcpyHostToDevice) ); HANDLE_ERROR( hipMemcpyToSymbol( "devL", &L, sizeof(int), 0, hipMemcpyHostToDevice) ); ftmp = (float)g_mu; fprintf(stdout, "# [] using mu = %f\n", ftmp); HANDLE_ERROR( hipMemcpyToSymbol( "devMu", &ftmp, sizeof(float), 0, hipMemcpyHostToDevice) ); ftmp = (float)( 1. / (2. * g_kappa) - 4. ); fprintf(stdout, "# [] using mq = %f\n", ftmp); HANDLE_ERROR( hipMemcpyToSymbol( "devMq", &ftmp, sizeof(float), 0, hipMemcpyHostToDevice) ); HANDLE_ERROR( hipMemcpyToSymbol( "dev_cvc_coeff", cvc_coeff, 2304*sizeof(float), 0, hipMemcpyHostToDevice) ); // HANDLE_ERROR( hipMemcpyToSymbol( dev_cvc_coeff, cvc_coeff, sizeof(cvc_coeff)) ); /************************* * call kernel *************************/ hipLaunchKernelGGL(( cvc_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_cvc, dev_ct, VOLUME); HANDLE_ERROR( hipMemcpy(cvc, dev_cvc, 32*VOLUME*sizeof(float), hipMemcpyDeviceToHost) ); HANDLE_ERROR( hipMemcpy(counter_term, dev_ct, 8*sizeof(float), hipMemcpyDeviceToHost) ); fprintf(stdout, "\n# [] float counter terms:\n"); fptr = (float*)counter_term; fprintf(stdout, "\t%d\t%f\t%f\n", 0, fptr[0], fptr[1]); fprintf(stdout, "\t%d\t%f\t%f\n", 1, fptr[2], fptr[3]); fprintf(stdout, "\t%d\t%f\t%f\n", 2, fptr[4], fptr[5]); fprintf(stdout, "\t%d\t%f\t%f\n", 3, fptr[6], fptr[7]); // cast to double precision dptr = (double*)cvc; fptr = (float*)cvc; /* for(ix=0;ix<VOLUME;ix++) { for(mu=0;mu<16;mu++) { fprintf(stdout, "%d\t%d\t%f\t%f\n", ix, mu, fptr[_GWI(mu,ix,VOLUME)], fptr[_GWI(mu,ix,VOLUME)+1]); } } */ for(i=32*VOLUME-1;i>=0;i--) dptr[i] = (double)fptr[i]; dptr = (double*)counter_term; fptr = (float*)counter_term; for(i=7;i>=0;i--) dptr[i] = (double)fptr[i]; /********************************************* * add phase factors, subtract counter term *********************************************/ #ifdef _UNDEF for(mu=0; mu<4; mu++) { double *phi = (double*)cvc + _GWI(5*mu,0,VOLUME); for(x0=0; x0<T; x0++) { phase[0] = 2. * (double)(x0) * M_PI / (double)T_global; for(x1=0; x1<LX; x1++) { phase[1] = 2. * (double)(x1) * M_PI / (double)LX; for(x2=0; x2<LY; x2++) { phase[2] = 2. * (double)(x2) * M_PI / (double)LY; for(x3=0; x3<LZ; x3++) { phase[3] = 2. * (double)(x3) * M_PI / (double)LZ; ix = g_ipt[x0][x1][x2][x3]; phi[2*ix ] = - ((double*)counter_term)[2*mu ]; phi[2*ix+1] = - ((double*)counter_term)[2*mu+1]; }}}} } /* of mu */ for(mu=0; mu<3; mu++) { for(nu=mu+1; nu<4; nu++) { double *phi = (double*)cvc + _GWI(4*mu+nu,0,VOLUME); double *chi = (double*)cvc + _GWI(4*nu+mu,0,VOLUME); for(x0=0; x0<T; x0++) { phase[0] = (double)(x0) * M_PI / (double)T_global; for(x1=0; x1<LX; x1++) { phase[1] = (double)(x1) * M_PI / (double)LX; for(x2=0; x2<LY; x2++) { phase[2] = (double)(x2) * M_PI / (double)LY; for(x3=0; x3<LZ; x3++) { phase[3] = (double)(x3) * M_PI / (double)LZ; ix = g_ipt[x0][x1][x2][x3]; w.re = cos( phase[mu] - phase[nu] ); w.im = sin( phase[mu] - phase[nu] ); _co_eq_co_ti_co(&w1,(complex*)( phi+2*ix ), &w); phi[2*ix ] = w1.re; phi[2*ix+1] = w1.im; w.re = cos( phase[nu] - phase[mu] ); w.im = sin( phase[nu] - phase[mu] ); _co_eq_co_ti_co(&w1,(complex*)( chi+2*ix ), &w); chi[2*ix ] = w1.re; chi[2*ix+1] = w1.im; }}}} }} /* of mu and nu */ #endif // write to file sprintf(filename, "pi_L%.2dT%.2d_mu%6.4f", L, T, g_mu); sprintf(contype, "tree-level vacuum polarization"); write_lime_contraction((double*)cvc, filename, 64, 16, contype, Nconf, 0); sprintf(filename, "pi_L%.2dT%.2d_mu%6.4f.ascii", L, T, g_mu); write_contraction((double*)cvc, NULL, filename, 16, 2, 0); dptr = (double*)counter_term; fprintf(stdout, "\n# [] counter terms:\n"); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[0], dptr[1]); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[2], dptr[3]); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[4], dptr[5]); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[6], dptr[7]); #ifdef _UNDEF #endif /************************************* * free and finalize *************************************/ if( cvc!=NULL ) free(cvc); if( counter_term!=NULL ) free(counter_term); hipFree( dev_cvc ); hipFree( dev_ct ); g_the_time = time(NULL); fprintf(stdout, "\n# [contractions] %s# [contractions] end of run\n", ctime(&g_the_time)); fprintf(stderr, "\n# [contractions] %s# [contractions] end of run\n", ctime(&g_the_time)); return(0); }
c73bc00dcaa6ef2b23514b67790bf11347ff9c76.cu
/********************************************************************************* * contractions.cu * * Sat Jul 2 11:19:43 CEST 2011 * * PURPOSE: * TODO: * DONE: * CHANGES: *********************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #ifdef MPI # include <mpi.h> # include <fftw_mpi.h> #else # include <fftw.h> #endif #include <getopt.h> #define MAIN_PROGRAM extern "C" { #include "cvc_complex.h" #include "cvc_linalg.h" #include "global.h" #include "cvc_geometry.h" #include "cvc_utils.h" #include "contractions_io.h" #include "read_input_parser.h" #include "contractions.h" } __constant__ int devT, devL; __constant__ float devMu, devMq; __constant__ float dev_cvc_coeff[2304]; /************************************************************* * the kernel for contract cvc *************************************************************/ __global__ void cvc_kernel (float*cvc_out, float*ct_out, unsigned int N) { int j0, j1, j2, j3, i0, i1, i2, i3; unsigned int L1, L2, L3, V4, imu, inu, icount, rest; // unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int isigma_mu, isigma_nu, ilambda, ikappa; float2 sp[6], sq[6]; float p[4], phalf[4], sinp[4], cosp[4], sinphalf[4], sinq[4], sinqhalf[4]; float q[4], qhalf[4], k[4]; // float khalf[4]; float2 phase[2][2]; float ftmp; float2 cvc_tmp[16], f2tmp, f2tmp2, counter_term[4]; float fTinv, fLinv; float aMp, aK2p, denomp; float aMq, aK2q, denomq; // get external momentum k from idx L1 = devL; L2 = L1 * L1; L3 = L2 * L1; V4 = L3 * devT; if(idx < N) { // initialize counter_term[0].x = 0.; counter_term[0].y = 0.; counter_term[1].x = 0.; counter_term[1].y = 0.; counter_term[2].x = 0.; counter_term[2].y = 0.; counter_term[3].x = 0.; counter_term[3].y = 0.; ftmp = -3.; cvc_tmp[ 0].x = ftmp; cvc_tmp[ 0].y = ftmp; cvc_tmp[ 1].x = ftmp; cvc_tmp[ 1].y = ftmp; cvc_tmp[ 2].x = ftmp; cvc_tmp[ 2].y = ftmp; cvc_tmp[ 3].x = ftmp; cvc_tmp[ 3].y = ftmp; cvc_tmp[ 4].x = ftmp; cvc_tmp[ 4].y = ftmp; cvc_tmp[ 5].x = ftmp; cvc_tmp[ 5].y = ftmp; cvc_tmp[ 6].x = ftmp; cvc_tmp[ 6].y = ftmp; cvc_tmp[ 7].x = ftmp; cvc_tmp[ 7].y = ftmp; cvc_tmp[ 8].x = ftmp; cvc_tmp[ 8].y = ftmp; cvc_tmp[ 9].x = ftmp; cvc_tmp[ 9].y = ftmp; cvc_tmp[10].x = ftmp; cvc_tmp[10].y = ftmp; cvc_tmp[11].x = ftmp; cvc_tmp[11].y = ftmp; cvc_tmp[12].x = ftmp; cvc_tmp[12].y = ftmp; cvc_tmp[13].x = ftmp; cvc_tmp[13].y = ftmp; cvc_tmp[14].x = ftmp; cvc_tmp[14].y = ftmp; cvc_tmp[15].x = ftmp; cvc_tmp[15].y = ftmp; ftmp = -(float)idx; cvc_out[_GWI( 0,idx,V4) ] = ftmp; cvc_out[_GWI( 0,idx,V4)+1] = ftmp; cvc_out[_GWI( 1,idx,V4) ] = ftmp; cvc_out[_GWI( 1,idx,V4)+1] = ftmp; cvc_out[_GWI( 2,idx,V4) ] = ftmp; cvc_out[_GWI( 2,idx,V4)+1] = ftmp; cvc_out[_GWI( 3,idx,V4) ] = ftmp; cvc_out[_GWI( 3,idx,V4)+1] = ftmp; cvc_out[_GWI( 4,idx,V4) ] = ftmp; cvc_out[_GWI( 4,idx,V4)+1] = ftmp; cvc_out[_GWI( 5,idx,V4) ] = ftmp; cvc_out[_GWI( 5,idx,V4)+1] = ftmp; cvc_out[_GWI( 6,idx,V4) ] = ftmp; cvc_out[_GWI( 6,idx,V4)+1] = ftmp; cvc_out[_GWI( 7,idx,V4) ] = ftmp; cvc_out[_GWI( 7,idx,V4)+1] = ftmp; cvc_out[_GWI( 8,idx,V4) ] = ftmp; cvc_out[_GWI( 8,idx,V4)+1] = ftmp; cvc_out[_GWI( 9,idx,V4) ] = ftmp; cvc_out[_GWI( 9,idx,V4)+1] = ftmp; cvc_out[_GWI(10,idx,V4) ] = ftmp; cvc_out[_GWI(10,idx,V4)+1] = ftmp; cvc_out[_GWI(11,idx,V4) ] = ftmp; cvc_out[_GWI(11,idx,V4)+1] = ftmp; cvc_out[_GWI(12,idx,V4) ] = ftmp; cvc_out[_GWI(12,idx,V4)+1] = ftmp; cvc_out[_GWI(13,idx,V4) ] = ftmp; cvc_out[_GWI(13,idx,V4)+1] = ftmp; cvc_out[_GWI(14,idx,V4) ] = ftmp; cvc_out[_GWI(14,idx,V4)+1] = ftmp; cvc_out[_GWI(15,idx,V4) ] = ftmp; cvc_out[_GWI(15,idx,V4)+1] = ftmp; j0 = idx / L3; icount = idx - L3*j0; j1 = icount / L2; icount = icount - L2*j1; j2 = icount / L1; j3 = icount - j2*L1; fTinv = 2. * _PI / (float)( devT ); fLinv = 2. * _PI / (float)( devL ); k[0] = (float)(j0) * fTinv; k[1] = (float)(j1) * fLinv; k[2] = (float)(j2) * fLinv; k[3] = (float)(j3) * fLinv; /* khalf[0] = 0.5 * k[0]; khalf[1] = 0.5 * k[1]; khalf[2] = 0.5 * k[2]; khalf[3] = 0.5 * k[3]; */ if(idx==102) { counter_term[0].x = fTinv; counter_term[0].y = fLinv; counter_term[1].x = k[0]; counter_term[1].y = k[1]; counter_term[2].x = k[2]; counter_term[2].y = k[3]; counter_term[3].x = (float)N; counter_term[3].y = (float)V4; } // loop on internal momentum p (summation) i0=0; i1=0; i2=0; i3=0; for(icount=0; icount<V4; icount++) { p[0] = ((float)(i0) + 0.5) * fTinv; phalf[0] = p[0] * 0.5; q[0] = ( ( (float)(i0) + (float)(j0) ) + 0.5 ) * fTinv; qhalf[0] = q[0] * 0.5; sinp[0] = sin( p[0] ); cosp[0] = cos( p[0] ); sinphalf[0] = sin( phalf[0] ); sinq[0] = sin( q[0] ); sinqhalf[0] = sin( qhalf[0] ); p[1] = (float)(i1) * fLinv; phalf[1] = p[1] * 0.5; q[1] = ( (float)(i1) + (float)(j1) ) * fLinv; qhalf[1] = q[1] * 0.5; sinp[1] = sin( p[1] ); cosp[1] = cos( p[1] ); sinphalf[1] = sin( phalf[1] ); sinq[1] = sin( q[1] ); sinqhalf[1] = sin( qhalf[1] ); p[2] = (float)(i2) * fLinv; phalf[2] = p[2] * 0.5; q[2] = ( (float)(i2) + (float)(j2) ) * fLinv; qhalf[2] = q[2] * 0.5; sinp[2] = sin( p[2] ); cosp[2] = cos( p[2] ); sinphalf[2] = sin( phalf[2] ); sinq[2] = sin( q[2] ); sinqhalf[2] = sin( qhalf[2] ); p[3] = (float)(i3) * fLinv; phalf[3] = p[3] * 0.5; q[3] = ( (float)(i3) + (float)(j3) ) * fLinv; qhalf[3] = q[3] * 0.5; sinp[3] = sin( p[3] ); cosp[3] = cos( p[3] ); sinphalf[3] = sin( phalf[3] ); sinq[3] = sin( q[3] ); sinqhalf[3] = sin( qhalf[3] ); aMp = devMq + 2. * (_SQR(sinphalf[0]) + _SQR(sinphalf[1]) + _SQR(sinphalf[2]) + _SQR(sinphalf[3])); aMq = devMq + 2. * (_SQR(sinqhalf[0]) + _SQR(sinqhalf[1]) + _SQR(sinqhalf[2]) + _SQR(sinqhalf[3])); aK2p = _SQR(sinp[0]) + _SQR(sinp[1]) + _SQR(sinp[2]) + _SQR(sinp[3]); aK2q = _SQR(sinq[0]) + _SQR(sinq[1]) + _SQR(sinq[2]) + _SQR(sinq[3]); denomp = 1. / ( aK2p + aMp*aMp + devMu*devMu ); denomq = 1. / ( aK2q + aMq*aMq + devMu*devMu ); sp[0].y = -sinp[0] * denomp; sp[1].y = -sinp[1] * denomp; sp[2].y = -sinp[2] * denomp; sp[3].y = -sinp[3] * denomp; sp[4].x = aMp * denomp; sp[5].y = -devMu * denomp; sq[0].y = -sinq[0] * denomq; sq[1].y = -sinq[1] * denomq; sq[2].y = -sinq[2] * denomq; sq[3].y = -sinq[3] * denomq; sq[4].x = aMq * denomq; sq[5].y = -devMu * denomq; _dev_set_phase(phase,p,k,0,0); cvc_tmp[0]. x += 1.; cvc_tmp[0].y += -2.; /* _cvc_accum( cvc_tmp[ 0], 0, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 0, 1 ); _cvc_accum( cvc_tmp[ 1], 0, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 0, 2 ); _cvc_accum( cvc_tmp[ 2], 0, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 0, 3 ); _cvc_accum( cvc_tmp[ 3], 0, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 1, 0 ); _cvc_accum( cvc_tmp[ 4], 1, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 1, 1 ); _cvc_accum( cvc_tmp[ 5], 1, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 1, 2 ); _cvc_accum( cvc_tmp[ 6], 1, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 1, 3 ); _cvc_accum( cvc_tmp[ 7], 1, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 2, 0 ); _cvc_accum( cvc_tmp[ 8], 2, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 2, 1 ); _cvc_accum( cvc_tmp[ 9], 2, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 2, 2 ); _cvc_accum( cvc_tmp[10], 2, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 2, 3 ); _cvc_accum( cvc_tmp[11], 2, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 3, 0 ); _cvc_accum( cvc_tmp[12], 3, 0, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 3, 1 ); _cvc_accum( cvc_tmp[13], 3, 1, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- _dev_set_phase( phase, p, k, 3, 2 ); _cvc_accum( cvc_tmp[14], 3, 2, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* _dev_set_phase( phase, p, k, 3, 3 ); _cvc_accum( cvc_tmp[15], 3, 3, dev_cvc_coeff, phase, sp, sq, f2tmp, f2tmp2 ); //-------------------------------------------------------------------- */ /* if( idx == 0 ){ counter_term[0].x += sinp[0] * sp[0].y + sp[4].x * cosp[0]; counter_term[0].y += -sinp[0] * sp[0].x + sp[4].y * cosp[0]; counter_term[1].x += sinp[1] * sp[1].y + sp[4].x * cosp[1]; counter_term[1].y += -sinp[1] * sp[1].x + sp[4].y * cosp[1]; counter_term[2].x += sinp[2] * sp[2].y + sp[4].x * cosp[2]; counter_term[2].y += -sinp[2] * sp[2].x + sp[4].y * cosp[2]; counter_term[3].x += sinp[3] * sp[3].y + sp[4].x * cosp[3]; counter_term[3].y += -sinp[3] * sp[3].x + sp[4].y * cosp[3]; } */ // increase the coordinates i0,...,i3 i3 += 1; rest = (i3==L1); i3 -= rest*L1; i2 += rest; rest = (i2==L1); i2 -= rest*L1; i1 += rest; rest = (i1==L1); i1 -= rest*L1; i0 += rest; rest = (i0==devT); i0 -= rest*devT; } // loop on icount // normalisation ftmp = 0.25 * _NSPIN * _NCOLOR / ( (float)(devT) * (float)(L1) * (float)(L1) * (float)(L1) ); cvc_out[_GWI( 0,idx,V4) ] = -cvc_tmp[ 0].x*ftmp; cvc_out[_GWI( 0,idx,V4)+1] = -cvc_tmp[ 0].y*ftmp; cvc_out[_GWI( 1,idx,V4) ] = -cvc_tmp[ 1].x*ftmp; cvc_out[_GWI( 1,idx,V4)+1] = -cvc_tmp[ 1].y*ftmp; cvc_out[_GWI( 2,idx,V4) ] = -cvc_tmp[ 2].x*ftmp; cvc_out[_GWI( 2,idx,V4)+1] = -cvc_tmp[ 2].y*ftmp; cvc_out[_GWI( 3,idx,V4) ] = -cvc_tmp[ 3].x*ftmp; cvc_out[_GWI( 3,idx,V4)+1] = -cvc_tmp[ 3].y*ftmp; cvc_out[_GWI( 4,idx,V4) ] = -cvc_tmp[ 4].x*ftmp; cvc_out[_GWI( 4,idx,V4)+1] = -cvc_tmp[ 4].y*ftmp; cvc_out[_GWI( 5,idx,V4) ] = -cvc_tmp[ 5].x*ftmp; cvc_out[_GWI( 5,idx,V4)+1] = -cvc_tmp[ 5].y*ftmp; cvc_out[_GWI( 6,idx,V4) ] = -cvc_tmp[ 6].x*ftmp; cvc_out[_GWI( 6,idx,V4)+1] = -cvc_tmp[ 6].y*ftmp; cvc_out[_GWI( 7,idx,V4) ] = -cvc_tmp[ 7].x*ftmp; cvc_out[_GWI( 7,idx,V4)+1] = -cvc_tmp[ 7].y*ftmp; cvc_out[_GWI( 8,idx,V4) ] = -cvc_tmp[ 8].x*ftmp; cvc_out[_GWI( 8,idx,V4)+1] = -cvc_tmp[ 8].y*ftmp; cvc_out[_GWI( 9,idx,V4) ] = -cvc_tmp[ 9].x*ftmp; cvc_out[_GWI( 9,idx,V4)+1] = -cvc_tmp[ 9].y*ftmp; cvc_out[_GWI(10,idx,V4) ] = -cvc_tmp[10].x*ftmp; cvc_out[_GWI(10,idx,V4)+1] = -cvc_tmp[10].y*ftmp; cvc_out[_GWI(11,idx,V4) ] = -cvc_tmp[11].x*ftmp; cvc_out[_GWI(11,idx,V4)+1] = -cvc_tmp[11].y*ftmp; cvc_out[_GWI(12,idx,V4) ] = -cvc_tmp[12].x*ftmp; cvc_out[_GWI(12,idx,V4)+1] = -cvc_tmp[12].y*ftmp; cvc_out[_GWI(13,idx,V4) ] = -cvc_tmp[13].x*ftmp; cvc_out[_GWI(13,idx,V4)+1] = -cvc_tmp[13].y*ftmp; cvc_out[_GWI(14,idx,V4) ] = -cvc_tmp[14].x*ftmp; cvc_out[_GWI(14,idx,V4)+1] = -cvc_tmp[14].y*ftmp; cvc_out[_GWI(15,idx,V4) ] = -cvc_tmp[15].x*ftmp; cvc_out[_GWI(15,idx,V4)+1] = -cvc_tmp[15].y*ftmp; ftmp *= 4.; // if(idx==0) if(idx==102) { ct_out[0] = -counter_term[0].x * ftmp; ct_out[1] = -counter_term[0].y * ftmp; ct_out[2] = -counter_term[1].x * ftmp; ct_out[3] = -counter_term[1].y * ftmp; ct_out[4] = -counter_term[2].x * ftmp; ct_out[5] = -counter_term[2].y * ftmp; ct_out[6] = -counter_term[3].x * ftmp; ct_out[7] = -counter_term[3].y * ftmp; } } // of if idx < N } /********************************************************************** * main program **********************************************************************/ int main(int argc, char **argv) { // int status; int c, filename_set=0, verbose=0; int mu, nu, x0, x1, x2, x3, ix; int imu, inu, isigma_mu, isigma_nu, ikappa, ilambda; unsigned int threadsPerBlock, blocksPerGrid; int i; double delta_mn, delta_mk, delta_nk, delta_ml, delta_nl, delta_lk; double sigma_mu, sigma_nu; float cvc_coeff[2304], phase[4]; double *dptr = NULL; float *fptr = NULL; // const double CVC_EPS = 5.e-15; void *cvc=NULL, *counter_term; // float WI_check; float ftmp; complex w, w1; char filename[80], contype[200]; float *dev_cvc, *dev_ct; cudaDeviceProp prop; while ((c = getopt(argc, argv, "h?f:v:")) != -1) { switch (c) { case 'f': strcpy(filename, optarg); filename_set=1; break; case 'v': verbose = atoi( optarg ); fprintf(stdout, "\n# [] using verbose mode %d\n", verbose); break; default: //usage(); break; } } /* get the time stamp */ g_the_time = time(NULL); /* set the default values */ if(filename_set==0) strcpy(filename, "cvc.input"); fprintf(stdout, "# Reading input from file %s\n", filename); read_input_parser(filename); T = T_global; L = LX; if(init_geometry() != 0) { fprintf(stderr, "ERROR from init_geometry\n"); exit(1); } geometry(); /*********************************************** * device properties ***********************************************/ HANDLE_ERROR( cudaGetDevice(&c) ); HANDLE_ERROR(cudaGetDeviceProperties(&prop, c) ); fprintf(stdout, "\n--- General info for device no. %d\n", c); fprintf(stdout, "Name: %s\n", prop.name); fprintf(stdout, "Compute capability: %d.%d\n", prop.major, prop.minor); printf("Clock rate: %d\n", prop.clockRate); printf("Device copy overlap: "); if(prop.deviceOverlap) { printf("Enabled\n"); } else { printf("Disabled\n"); } printf("Kernel execution timeout: "); if(prop.kernelExecTimeoutEnabled) { printf("Enabled\n"); } else { printf("Disabled\n"); } printf("\n--- Memory info for device no. %d\n", c); printf("Total global mem: %ld\n", prop.totalGlobalMem); printf("Total constant mem: %ld\n", prop.totalConstMem); printf("Max mem pitch: %ld\n", prop.memPitch); printf("Texture alignment: %ld\n", prop.textureAlignment); printf("\n--- MP info for device no. %d\n", c); printf("Multiprocessor count: %d\n", prop.multiProcessorCount); printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock); printf("Registers mem per mp: %d\n", prop.regsPerBlock); printf("Threads in warp: %d\n", prop.warpSize); printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); printf("Max thread dimension: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("Max grid dimension: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\n\n"); /*********************************************** * set number of threads and blocks ***********************************************/ threadsPerBlock = THREADS_PER_BLOCK; blocksPerGrid = (VOLUME + threadsPerBlock-1)/threadsPerBlock; fprintf(stdout, "\n# [contractions] number threads per block: %u\n", threadsPerBlock); fprintf(stdout, "\n# [contractions] number blocks per grid : %u\n", blocksPerGrid); // allocate memory for cvc cvc = calloc( 32*VOLUME, sizeof(double) ); counter_term = calloc( 8, sizeof(double) ); if( cvc == NULL || counter_term==NULL) { fprintf(stderr, "\nError, could not alloc cvc\n"); exit(2); } /*************************** * initialize on host ***************************/ for(imu=0;imu<2304;imu++) cvc_coeff[imu] = 0.; // set the coefficients for the correlation functions for(imu=0; imu<4;imu++) { for(inu=0; inu<4;inu++) { delta_mn = (float)(imu==inu); for(isigma_mu=0; isigma_mu<2;isigma_mu++) { for(isigma_nu=0; isigma_nu<2;isigma_nu++) { sigma_mu = 2.*isigma_mu-1.; sigma_nu = 2.*isigma_nu-1.; // C_4_4 cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, 4, 4) ] = delta_mn + sigma_mu*sigma_nu; // C_4_5, C_5_4, C_l_5, C_5_k // all 0 // C_5_5 cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, 5, 5) ] = -delta_mn + sigma_mu*sigma_nu; // C_4_k for(ikappa=0;ikappa<4;ikappa++) { delta_mk = (float)( imu == ikappa) ; delta_nk = (float)( inu == ikappa ); cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, 4, ikappa) ] = delta_mk*sigma_nu + delta_nk*sigma_mu; } // C_l_4 for( ilambda=0; ilambda<4;ilambda++) { delta_ml = (float)(imu==ilambda); delta_nl = (float)(inu==ilambda); cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 4) ] = delta_ml*sigma_nu + delta_nl*sigma_mu; } // C_l_k for(ilambda=0; ilambda<4;ilambda++) { for(ikappa=0; ikappa<4; ikappa++ ) { //************************************* //************************************* delta_ml = (float)(imu==ilambda); // ************************************ delta_mk = (float)(imu==ikappa); // ************************************ // ************************************ delta_nl = (float)(inu==ilambda); // ************************************ delta_nk = (float)(inu==ikappa); // ************************************ // ************************************ delta_lk = (float)(ilambda==ikappa); // ************************************ // ************************************ cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, ikappa) ] = \ delta_ml*delta_nk - delta_mn*delta_lk + delta_mk*delta_nl + delta_lk*sigma_mu*sigma_nu; }} }} // of isigma_mu, isigma_nu }} // of imu, inu /************************************************************************** * test: print the matrix cvc_coeff **************************************************************************/ if(verbose > 0) { for(imu=0;imu<4;imu++) { for(inu=0;inu<4;inu++) { fprintf(stdout, "# ---------------------------------------------------------------\n"); fprintf(stdout, "# imu = %d; inu = %d\n", imu, inu); for(isigma_mu=0;isigma_mu<2;isigma_mu++) { for(isigma_nu=0;isigma_nu<2;isigma_nu++) { fprintf(stdout, "# ---------------------------------------------------------------\n"); fprintf(stdout, "#\t sigma_mu = %e; sigma_nu = %e\n", 2.*isigma_mu-1., 2.*isigma_nu-1.); for(ilambda=0;ilambda<6;ilambda++) { fprintf(stdout, "%5.3f\t%5.3f\t%5.3f\t%5.3f\t%5.3f\t%5.3f\n", cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 0) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 1) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 2) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 3) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 4) ], cvc_coeff[ _CVC_COEFF_IDX(imu, inu, isigma_mu, isigma_nu, ilambda, 5) ] ); } }} }} } /*************************************** * allocate fields, initialize on device ***************************************/ HANDLE_ERROR( cudaMalloc(&dev_cvc, 32*VOLUME*sizeof(float)) ); HANDLE_ERROR( cudaMalloc(&dev_ct, 8*sizeof(float)) ); HANDLE_ERROR( cudaMemcpyToSymbol( "devT", &T, sizeof(int), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol( "devL", &L, sizeof(int), 0, cudaMemcpyHostToDevice) ); ftmp = (float)g_mu; fprintf(stdout, "# [] using mu = %f\n", ftmp); HANDLE_ERROR( cudaMemcpyToSymbol( "devMu", &ftmp, sizeof(float), 0, cudaMemcpyHostToDevice) ); ftmp = (float)( 1. / (2. * g_kappa) - 4. ); fprintf(stdout, "# [] using mq = %f\n", ftmp); HANDLE_ERROR( cudaMemcpyToSymbol( "devMq", &ftmp, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol( "dev_cvc_coeff", cvc_coeff, 2304*sizeof(float), 0, cudaMemcpyHostToDevice) ); // HANDLE_ERROR( cudaMemcpyToSymbol( dev_cvc_coeff, cvc_coeff, sizeof(cvc_coeff)) ); /************************* * call kernel *************************/ cvc_kernel<<<blocksPerGrid, threadsPerBlock>>>(dev_cvc, dev_ct, VOLUME); HANDLE_ERROR( cudaMemcpy(cvc, dev_cvc, 32*VOLUME*sizeof(float), cudaMemcpyDeviceToHost) ); HANDLE_ERROR( cudaMemcpy(counter_term, dev_ct, 8*sizeof(float), cudaMemcpyDeviceToHost) ); fprintf(stdout, "\n# [] float counter terms:\n"); fptr = (float*)counter_term; fprintf(stdout, "\t%d\t%f\t%f\n", 0, fptr[0], fptr[1]); fprintf(stdout, "\t%d\t%f\t%f\n", 1, fptr[2], fptr[3]); fprintf(stdout, "\t%d\t%f\t%f\n", 2, fptr[4], fptr[5]); fprintf(stdout, "\t%d\t%f\t%f\n", 3, fptr[6], fptr[7]); // cast to double precision dptr = (double*)cvc; fptr = (float*)cvc; /* for(ix=0;ix<VOLUME;ix++) { for(mu=0;mu<16;mu++) { fprintf(stdout, "%d\t%d\t%f\t%f\n", ix, mu, fptr[_GWI(mu,ix,VOLUME)], fptr[_GWI(mu,ix,VOLUME)+1]); } } */ for(i=32*VOLUME-1;i>=0;i--) dptr[i] = (double)fptr[i]; dptr = (double*)counter_term; fptr = (float*)counter_term; for(i=7;i>=0;i--) dptr[i] = (double)fptr[i]; /********************************************* * add phase factors, subtract counter term *********************************************/ #ifdef _UNDEF for(mu=0; mu<4; mu++) { double *phi = (double*)cvc + _GWI(5*mu,0,VOLUME); for(x0=0; x0<T; x0++) { phase[0] = 2. * (double)(x0) * M_PI / (double)T_global; for(x1=0; x1<LX; x1++) { phase[1] = 2. * (double)(x1) * M_PI / (double)LX; for(x2=0; x2<LY; x2++) { phase[2] = 2. * (double)(x2) * M_PI / (double)LY; for(x3=0; x3<LZ; x3++) { phase[3] = 2. * (double)(x3) * M_PI / (double)LZ; ix = g_ipt[x0][x1][x2][x3]; phi[2*ix ] = - ((double*)counter_term)[2*mu ]; phi[2*ix+1] = - ((double*)counter_term)[2*mu+1]; }}}} } /* of mu */ for(mu=0; mu<3; mu++) { for(nu=mu+1; nu<4; nu++) { double *phi = (double*)cvc + _GWI(4*mu+nu,0,VOLUME); double *chi = (double*)cvc + _GWI(4*nu+mu,0,VOLUME); for(x0=0; x0<T; x0++) { phase[0] = (double)(x0) * M_PI / (double)T_global; for(x1=0; x1<LX; x1++) { phase[1] = (double)(x1) * M_PI / (double)LX; for(x2=0; x2<LY; x2++) { phase[2] = (double)(x2) * M_PI / (double)LY; for(x3=0; x3<LZ; x3++) { phase[3] = (double)(x3) * M_PI / (double)LZ; ix = g_ipt[x0][x1][x2][x3]; w.re = cos( phase[mu] - phase[nu] ); w.im = sin( phase[mu] - phase[nu] ); _co_eq_co_ti_co(&w1,(complex*)( phi+2*ix ), &w); phi[2*ix ] = w1.re; phi[2*ix+1] = w1.im; w.re = cos( phase[nu] - phase[mu] ); w.im = sin( phase[nu] - phase[mu] ); _co_eq_co_ti_co(&w1,(complex*)( chi+2*ix ), &w); chi[2*ix ] = w1.re; chi[2*ix+1] = w1.im; }}}} }} /* of mu and nu */ #endif // write to file sprintf(filename, "pi_L%.2dT%.2d_mu%6.4f", L, T, g_mu); sprintf(contype, "tree-level vacuum polarization"); write_lime_contraction((double*)cvc, filename, 64, 16, contype, Nconf, 0); sprintf(filename, "pi_L%.2dT%.2d_mu%6.4f.ascii", L, T, g_mu); write_contraction((double*)cvc, NULL, filename, 16, 2, 0); dptr = (double*)counter_term; fprintf(stdout, "\n# [] counter terms:\n"); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[0], dptr[1]); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[2], dptr[3]); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[4], dptr[5]); fprintf(stdout, "\t%d\t%e\t%e\n", 0, dptr[6], dptr[7]); #ifdef _UNDEF #endif /************************************* * free and finalize *************************************/ if( cvc!=NULL ) free(cvc); if( counter_term!=NULL ) free(counter_term); cudaFree( dev_cvc ); cudaFree( dev_ct ); g_the_time = time(NULL); fprintf(stdout, "\n# [contractions] %s# [contractions] end of run\n", ctime(&g_the_time)); fprintf(stderr, "\n# [contractions] %s# [contractions] end of run\n", ctime(&g_the_time)); return(0); }
4138ee884d5aaa7f0e932dbd5fdb0bc7bde626c9.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime_api.h> #include <vector> #include <stdio.h> #include <vector_functions.h> #include <cmath> #include "isosurfaces_cuda/hdr/common.h" #include "isosurfaces_cuda/hdr/helper_cuda.h" #include <unistd.h> #include "isosurfaces_cuda/hdr/common.h" using namespace std; #ifdef DISABLE_PRINTF #define printf(fmt, ...) (0) #endif #define MAX_PER_RUN 1000000 __device__ __host__ float calculateMutualInformation(unsigned int* hist, const int sizeX, const int sizeY, const float numValues, unsigned int* colSums, unsigned int* rowSums) { float hX = 0; float hY = 0; float hXY = 0; for(int i = 0; i < sizeX; ++i) { for (int j = 0; j < sizeY; ++j) { if (hist[i * sizeY + j] > 0) { float pxy = hist[i * sizeY + j]; hXY -= pxy * logf(pxy);//__logf(x) For x in [0.5, 2], the maximum absolute error is 2-21.41, otherwise, the maximum ulp error is 3. } } if (colSums[i] > 0) { float px = colSums[i]; hX -= px * logf(px); } if (rowSums[i] > 0) { float py = rowSums[i]; hY -= py * logf(py); } } hXY = hXY/numValues+logf(numValues); hX = hX/numValues+logf(numValues); hY = hY/numValues+logf(numValues); float iXY = hX + hY - hXY; float val = 2 * iXY/(hX + hY); return val != val ? 0.0f : val; } __device__ __host__ int findBucket_d(const float val, const float minValue, const float step, const int histogramSize, int id1, int id2) { if (step == 0) { return 0; } int bucket = (int) ((val - minValue) / step); if(bucket < 0) { printf("%d, %d: bucket: %d, %0.2f, %0.2f, %0.2f\n", id1, id2, bucket, val, minValue, step); } return bucket >= histogramSize ? histogramSize - 1 : bucket; } struct runInfo { unsigned long tasksPerRun; unsigned long lastRunMemBytes; unsigned long histMemMax; unsigned long offsetMax; unsigned long memoryPerRun; bool odd; unsigned long maxSize; unsigned long fieldMaxMemory; bool fieldsLastRun; unsigned long fieldsLastRunBytes; bool multi; }; struct dataInfo { const int fieldSize; const int histogramSize; const int numFields; const int similarityMapSize; }; __global__ void calculate_histogram_g(float *distanceFields, float *minValues, float *maxValues, unsigned int *jointHist, unsigned int *colSums, unsigned int *rowSums, int2 *tasks, const unsigned long offset, const unsigned long fieldOffset, float* simMap, runInfo rInfo, dataInfo dInfo) { int idx = threadIdx.x + blockIdx.x * blockDim.x; bool last = false; if (rInfo.odd && offset + 1 == rInfo.offsetMax) { last = true; ///this is necessary to see if we have a single run as last run due to memory limitations (and uneven numbers) } if (idx < rInfo.tasksPerRun && (!last || idx < 1)) { const int fieldSize = dInfo.fieldSize; const int histogramSize = dInfo.histogramSize; const int numFields = dInfo.numFields; unsigned long maxSize = rInfo.maxSize; int2 task = tasks[idx + offset * rInfo.tasksPerRun]; ///get data int id1 = task.x; int id2 = task.y; unsigned long field1Id = static_cast<unsigned long>(id1 * MAX_PER_RUN); unsigned long field2Id = static_cast<unsigned long>(id2 * MAX_PER_RUN); float *f1 = &distanceFields[field1Id]; float *f2 = &distanceFields[field2Id]; ///preparation for bucket selection float min = minValues[id1] < minValues[id2] ? minValues[id1] : minValues[id2]; float max = maxValues[id1] > maxValues[id2] ? maxValues[id1] : maxValues[id2]; float step = (max - min) / histogramSize; ///index calculations id2 = rInfo.multi ? id2 - numFields : id2; int idx1 = id1 * numFields * histogramSize * histogramSize; int idx2 = id2 * histogramSize * histogramSize; int histIndex = idx1 + idx2; int colRowIndex = id1 * numFields * histogramSize + id2 * histogramSize; unsigned long j; unsigned long offs = rInfo.fieldsLastRun ? rInfo.fieldsLastRunBytes : MAX_PER_RUN; for (j = 0; j < offs && j < fieldSize; ++j) { int row = findBucket_d(f1[j], min, step, histogramSize, id1, id2); int column = findBucket_d(f2[j], min, step, histogramSize, id1, id2); ++jointHist[(histIndex + row * histogramSize + column) % maxSize]; ++colSums[colRowIndex + column]; ++rowSums[colRowIndex + row]; } if (rInfo.fieldsLastRun && j == offs) { simMap[id1 * dInfo.similarityMapSize + id2] = calculateMutualInformation(&jointHist[histIndex % maxSize], histogramSize, histogramSize, fieldSize, &colSums[colRowIndex], &rowSums[colRowIndex]); } } } __host__ void calculate_histogram_CPU(float *distanceFields, float *minValues, float *maxValues, unsigned int *jointHist, unsigned int *colSums, unsigned int *rowSums, int2 *tasks, const int offset, const int fieldOffset, float* simMap, runInfo rInfo, dataInfo dInfo) { for (int idx = 0; idx < rInfo.tasksPerRun; ++idx) { bool last = false; if (rInfo.odd && offset + 1 == rInfo.offsetMax) { last = true; ///this is necessary to see if we have a single run as last run due to memory limitations (and uneven numbers) } if (idx < rInfo.tasksPerRun && (!last || idx < 1)) { const int fieldSize = dInfo.fieldSize; const int histogramSize = dInfo.histogramSize; const int numFields = dInfo.numFields; unsigned long maxSize = rInfo.maxSize; int2 task = tasks[idx + offset * rInfo.tasksPerRun]; ///get data int id1 = task.x; int id2 = task.y; int field1Id = id1 * MAX_PER_RUN; int field2Id = id2 * MAX_PER_RUN; float *f1 = &distanceFields[field1Id]; float *f2 = &distanceFields[field2Id]; ///preparation for bucket selection float min = minValues[id1] < minValues[id2] ? minValues[id1] : minValues[id2]; float max = maxValues[id1] > maxValues[id2] ? maxValues[id1] : maxValues[id2]; float step = (max - min) / histogramSize; ///index calculations id2 = rInfo.multi ? id2 - numFields : id2; int idx1 = id1 * numFields * histogramSize * histogramSize; int idx2 = id2 * histogramSize * histogramSize; int histIndex = idx1 + idx2; int colRowIndex = id1 * numFields * histogramSize + id2 * histogramSize; int j; unsigned long offs = rInfo.fieldsLastRun ? rInfo.fieldsLastRunBytes : MAX_PER_RUN; for (j = 0; j < offs && j < fieldSize; ++j) { int row = findBucket_d(f1[j], min, step, histogramSize, id1,id2); int column = findBucket_d(f2[j], min, step, histogramSize, id1, id2); ++jointHist[(histIndex + row * histogramSize + column) % maxSize]; ++colSums[colRowIndex + column]; ++rowSums[colRowIndex + row]; } if (rInfo.fieldsLastRun && j == offs) { simMap[id1 * dInfo.similarityMapSize + id2] = calculateMutualInformation(&jointHist[histIndex % maxSize], histogramSize, histogramSize, fieldSize, &colSums[colRowIndex], &rowSums[colRowIndex]); } } } } runInfo calculateRunInfo(const int numTasks, const unsigned long HISTOGRAM_MEMORY_SIZE, unsigned long NUM_HISTOGRAMS) { unsigned long offsetMax = 1; unsigned long histMemMax = HISTOGRAM_MEMORY_SIZE; unsigned long lastRunMemBytes = 0; if (HISTOGRAM_MEMORY_SIZE > MALLOC_MAX_BYTES) { while (++offsetMax * MALLOC_MAX_BYTES < HISTOGRAM_MEMORY_SIZE) {} histMemMax = HISTOGRAM_MEMORY_SIZE / offsetMax; } lastRunMemBytes = histMemMax; //we know we need 2 runs, to fit everything into the memory //now we check how many taskList per run we can do unsigned long tasksPerRun = numTasks / offsetMax; //check if we have an odd amount of runs bool odd = numTasks % 2 == 1 && offsetMax > 1; // if we have to do more than one run it's important to know if we have an odd amount. unsigned long memoryPerRun = histMemMax; unsigned long MEM_PER_HIST = HISTOGRAM_MEMORY_SIZE / NUM_HISTOGRAMS; if (odd) { memoryPerRun = histMemMax - MEM_PER_HIST / offsetMax; ++offsetMax; lastRunMemBytes = MEM_PER_HIST; } runInfo r = {tasksPerRun, lastRunMemBytes, histMemMax, offsetMax, memoryPerRun, odd, MAX_PER_RUN * sizeof(float)}; return r; } extern "C" __host__ void calculate_histogram_h(float **distanceFields, float *minValues, float *maxValues, const int fieldSize, const unsigned long numFields, const unsigned long histogramSize, unsigned int *h_jointHist, unsigned int *h_colSums, unsigned int *h_rowSums, float* h_simMap, const int simMapSize, const bool multi) { ///setup printf("calculate_histogram_h start\n"); printf("accolacting memory on gpu\n"); float *d_distanceFields; float *d_minValues; float *d_maxValues; unsigned int *d_jointHist; unsigned int *d_colSums; unsigned int *d_rowSums; float *d_simMap; unsigned long NUM_HISTOGRAMS = (numFields * numFields); unsigned long MINMAX_MEMORY_SIZE = numFields * sizeof(*d_minValues) * (multi ? 2 : 1); unsigned long HISTOGRAM_MEMORY_SIZE = numFields * numFields * histogramSize * histogramSize * sizeof(*d_jointHist); unsigned long SUMS_MEMORY_SIZE = numFields * numFields * histogramSize * sizeof(*d_colSums); unsigned long SIMMAP_MEMORY_SIZE = simMapSize * simMapSize * sizeof(*d_simMap); checkCudaErrors(hipMalloc((void **) &d_distanceFields, numFields * MAX_PER_RUN * sizeof(*d_distanceFields) * (multi ? 2 : 1))); checkCudaErrors(hipMalloc((void **) &d_minValues, MINMAX_MEMORY_SIZE)); checkCudaErrors(hipMalloc((void **) &d_maxValues, MINMAX_MEMORY_SIZE)); checkCudaErrors(hipMalloc((void **) &d_colSums, SUMS_MEMORY_SIZE)); checkCudaErrors(hipMalloc((void **) &d_rowSums, SUMS_MEMORY_SIZE)); checkCudaErrors(hipMalloc((void **) &d_simMap, SIMMAP_MEMORY_SIZE)); checkCudaErrors(hipMemset(d_colSums, 0, SUMS_MEMORY_SIZE)); checkCudaErrors(hipMemset(d_rowSums, 0, SUMS_MEMORY_SIZE)); checkCudaErrors(hipMemcpy(d_minValues, minValues, MINMAX_MEMORY_SIZE, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_maxValues, maxValues, MINMAX_MEMORY_SIZE, hipMemcpyHostToDevice)); ///put into setup function printf("creating tasks\n"); size_t taskSize = numFields * numFields * sizeof(int2); int2 *taskList = (int2 *) malloc(taskSize); size_t c = 0; if (multi) { for (int i = 0; i < numFields; ++i) { for (int j = 0; j < numFields; ++j) { taskList[i * numFields + j] = make_int2(i, j+numFields); ++c; } } } else { for (int i = 0; i < numFields; ++i) { for (int j = 0; j < numFields; ++j) { taskList[c++] = make_int2(i, j); } } } size_t numTasks = c; int2 *d_tasks; taskSize = numTasks * sizeof(*d_tasks); checkCudaErrors(hipMalloc((void **) &d_tasks, taskSize)); checkCudaErrors(hipMemcpy(d_tasks, taskList, taskSize, hipMemcpyHostToDevice)); free(taskList); ///finding the loop rounds printf("getting runInfo\n"); runInfo rInfo = calculateRunInfo(numTasks, HISTOGRAM_MEMORY_SIZE, NUM_HISTOGRAMS); rInfo.multi = multi; // int tasksPerRun = rInfo.tasksPerRun; unsigned long lastRunMemBytes = rInfo.lastRunMemBytes; unsigned long histMemMax = rInfo.histMemMax; unsigned long offsetMax = rInfo.offsetMax; unsigned long memoryPerRun = rInfo.memoryPerRun; bool odd = rInfo.odd; rInfo.maxSize = memoryPerRun / sizeof(unsigned int); printf("memory per run: %lu\n", memoryPerRun); printf("getting data info\n"); dataInfo dInfo = {fieldSize, histogramSize, numFields, simMapSize}; checkCudaErrors(hipMalloc((void **) &d_jointHist, memoryPerRun)); printf("computing launch params\n"); int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, calculate_histogram_g, 0, 0); // Round up according to array size gridSize = (((int)rInfo.tasksPerRun) + blockSize - 1) / blockSize; size_t f, t; checkCudaErrors(hipMemGetInfo(&f, &t)); ///main task for (unsigned long offset = 0; offset < offsetMax; ++offset) { checkCudaErrors(hipMemset(d_jointHist, 0, memoryPerRun)); unsigned long fieldsBytes = MAX_PER_RUN * sizeof(float); rInfo.fieldsLastRun = false; for (unsigned long fieldOffset = 0; fieldOffset < fieldSize; fieldOffset += MAX_PER_RUN) { if (fieldOffset + MAX_PER_RUN >= fieldSize) { rInfo.fieldsLastRun = true; rInfo.fieldsLastRunBytes = (unsigned long) (fieldSize % MAX_PER_RUN); fieldsBytes = rInfo.fieldsLastRunBytes * sizeof(float); } for (unsigned long i = 0; i < numFields*(multi ? 2 : 1); ++i) { checkCudaErrors(hipMemcpy(&d_distanceFields[i*MAX_PER_RUN], &distanceFields[i][fieldOffset], fieldsBytes, hipMemcpyHostToDevice)); } calculate_histogram_g << < gridSize , blockSize >> > (d_distanceFields, d_minValues, d_maxValues, d_jointHist, d_colSums, d_rowSums, d_tasks, offset, fieldOffset, d_simMap, rInfo, dInfo); } int ix = (int) (offset * (memoryPerRun / sizeof(int))); unsigned long bytesToCopy = offset + 1 == offsetMax ? lastRunMemBytes : memoryPerRun; // checkCudaErrors(hipMemcpy(&h_jointHist[ix], d_jointHist, bytesToCopy, hipMemcpyDeviceToHost)); } ///cleanup checkCudaErrors(hipFree(d_jointHist)); checkCudaErrors(hipMemcpy(h_colSums, d_colSums, SUMS_MEMORY_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_rowSums, d_rowSums, SUMS_MEMORY_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_simMap, d_simMap, SIMMAP_MEMORY_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_distanceFields)); checkCudaErrors(hipFree(d_minValues)); checkCudaErrors(hipFree(d_maxValues)); checkCudaErrors(hipFree(d_colSums)); checkCudaErrors(hipFree(d_rowSums)); checkCudaErrors(hipFree(d_tasks)); checkCudaErrors(hipFree(d_simMap)); return; }
4138ee884d5aaa7f0e932dbd5fdb0bc7bde626c9.cu
#include <cuda.h> #include <device_launch_parameters.h> #include <cuda_runtime_api.h> #include <vector> #include <stdio.h> #include <vector_functions.h> #include <cmath> #include "isosurfaces_cuda/hdr/common.h" #include "isosurfaces_cuda/hdr/helper_cuda.h" #include <unistd.h> #include "isosurfaces_cuda/hdr/common.h" using namespace std; #ifdef DISABLE_PRINTF #define printf(fmt, ...) (0) #endif #define MAX_PER_RUN 1000000 __device__ __host__ float calculateMutualInformation(unsigned int* hist, const int sizeX, const int sizeY, const float numValues, unsigned int* colSums, unsigned int* rowSums) { float hX = 0; float hY = 0; float hXY = 0; for(int i = 0; i < sizeX; ++i) { for (int j = 0; j < sizeY; ++j) { if (hist[i * sizeY + j] > 0) { float pxy = hist[i * sizeY + j]; hXY -= pxy * logf(pxy);//__logf(x) For x in [0.5, 2], the maximum absolute error is 2-21.41, otherwise, the maximum ulp error is 3. } } if (colSums[i] > 0) { float px = colSums[i]; hX -= px * logf(px); } if (rowSums[i] > 0) { float py = rowSums[i]; hY -= py * logf(py); } } hXY = hXY/numValues+logf(numValues); hX = hX/numValues+logf(numValues); hY = hY/numValues+logf(numValues); float iXY = hX + hY - hXY; float val = 2 * iXY/(hX + hY); return val != val ? 0.0f : val; } __device__ __host__ int findBucket_d(const float val, const float minValue, const float step, const int histogramSize, int id1, int id2) { if (step == 0) { return 0; } int bucket = (int) ((val - minValue) / step); if(bucket < 0) { printf("%d, %d: bucket: %d, %0.2f, %0.2f, %0.2f\n", id1, id2, bucket, val, minValue, step); } return bucket >= histogramSize ? histogramSize - 1 : bucket; } struct runInfo { unsigned long tasksPerRun; unsigned long lastRunMemBytes; unsigned long histMemMax; unsigned long offsetMax; unsigned long memoryPerRun; bool odd; unsigned long maxSize; unsigned long fieldMaxMemory; bool fieldsLastRun; unsigned long fieldsLastRunBytes; bool multi; }; struct dataInfo { const int fieldSize; const int histogramSize; const int numFields; const int similarityMapSize; }; __global__ void calculate_histogram_g(float *distanceFields, float *minValues, float *maxValues, unsigned int *jointHist, unsigned int *colSums, unsigned int *rowSums, int2 *tasks, const unsigned long offset, const unsigned long fieldOffset, float* simMap, runInfo rInfo, dataInfo dInfo) { int idx = threadIdx.x + blockIdx.x * blockDim.x; bool last = false; if (rInfo.odd && offset + 1 == rInfo.offsetMax) { last = true; ///this is necessary to see if we have a single run as last run due to memory limitations (and uneven numbers) } if (idx < rInfo.tasksPerRun && (!last || idx < 1)) { const int fieldSize = dInfo.fieldSize; const int histogramSize = dInfo.histogramSize; const int numFields = dInfo.numFields; unsigned long maxSize = rInfo.maxSize; int2 task = tasks[idx + offset * rInfo.tasksPerRun]; ///get data int id1 = task.x; int id2 = task.y; unsigned long field1Id = static_cast<unsigned long>(id1 * MAX_PER_RUN); unsigned long field2Id = static_cast<unsigned long>(id2 * MAX_PER_RUN); float *f1 = &distanceFields[field1Id]; float *f2 = &distanceFields[field2Id]; ///preparation for bucket selection float min = minValues[id1] < minValues[id2] ? minValues[id1] : minValues[id2]; float max = maxValues[id1] > maxValues[id2] ? maxValues[id1] : maxValues[id2]; float step = (max - min) / histogramSize; ///index calculations id2 = rInfo.multi ? id2 - numFields : id2; int idx1 = id1 * numFields * histogramSize * histogramSize; int idx2 = id2 * histogramSize * histogramSize; int histIndex = idx1 + idx2; int colRowIndex = id1 * numFields * histogramSize + id2 * histogramSize; unsigned long j; unsigned long offs = rInfo.fieldsLastRun ? rInfo.fieldsLastRunBytes : MAX_PER_RUN; for (j = 0; j < offs && j < fieldSize; ++j) { int row = findBucket_d(f1[j], min, step, histogramSize, id1, id2); int column = findBucket_d(f2[j], min, step, histogramSize, id1, id2); ++jointHist[(histIndex + row * histogramSize + column) % maxSize]; ++colSums[colRowIndex + column]; ++rowSums[colRowIndex + row]; } if (rInfo.fieldsLastRun && j == offs) { simMap[id1 * dInfo.similarityMapSize + id2] = calculateMutualInformation(&jointHist[histIndex % maxSize], histogramSize, histogramSize, fieldSize, &colSums[colRowIndex], &rowSums[colRowIndex]); } } } __host__ void calculate_histogram_CPU(float *distanceFields, float *minValues, float *maxValues, unsigned int *jointHist, unsigned int *colSums, unsigned int *rowSums, int2 *tasks, const int offset, const int fieldOffset, float* simMap, runInfo rInfo, dataInfo dInfo) { for (int idx = 0; idx < rInfo.tasksPerRun; ++idx) { bool last = false; if (rInfo.odd && offset + 1 == rInfo.offsetMax) { last = true; ///this is necessary to see if we have a single run as last run due to memory limitations (and uneven numbers) } if (idx < rInfo.tasksPerRun && (!last || idx < 1)) { const int fieldSize = dInfo.fieldSize; const int histogramSize = dInfo.histogramSize; const int numFields = dInfo.numFields; unsigned long maxSize = rInfo.maxSize; int2 task = tasks[idx + offset * rInfo.tasksPerRun]; ///get data int id1 = task.x; int id2 = task.y; int field1Id = id1 * MAX_PER_RUN; int field2Id = id2 * MAX_PER_RUN; float *f1 = &distanceFields[field1Id]; float *f2 = &distanceFields[field2Id]; ///preparation for bucket selection float min = minValues[id1] < minValues[id2] ? minValues[id1] : minValues[id2]; float max = maxValues[id1] > maxValues[id2] ? maxValues[id1] : maxValues[id2]; float step = (max - min) / histogramSize; ///index calculations id2 = rInfo.multi ? id2 - numFields : id2; int idx1 = id1 * numFields * histogramSize * histogramSize; int idx2 = id2 * histogramSize * histogramSize; int histIndex = idx1 + idx2; int colRowIndex = id1 * numFields * histogramSize + id2 * histogramSize; int j; unsigned long offs = rInfo.fieldsLastRun ? rInfo.fieldsLastRunBytes : MAX_PER_RUN; for (j = 0; j < offs && j < fieldSize; ++j) { int row = findBucket_d(f1[j], min, step, histogramSize, id1,id2); int column = findBucket_d(f2[j], min, step, histogramSize, id1, id2); ++jointHist[(histIndex + row * histogramSize + column) % maxSize]; ++colSums[colRowIndex + column]; ++rowSums[colRowIndex + row]; } if (rInfo.fieldsLastRun && j == offs) { simMap[id1 * dInfo.similarityMapSize + id2] = calculateMutualInformation(&jointHist[histIndex % maxSize], histogramSize, histogramSize, fieldSize, &colSums[colRowIndex], &rowSums[colRowIndex]); } } } } runInfo calculateRunInfo(const int numTasks, const unsigned long HISTOGRAM_MEMORY_SIZE, unsigned long NUM_HISTOGRAMS) { unsigned long offsetMax = 1; unsigned long histMemMax = HISTOGRAM_MEMORY_SIZE; unsigned long lastRunMemBytes = 0; if (HISTOGRAM_MEMORY_SIZE > MALLOC_MAX_BYTES) { while (++offsetMax * MALLOC_MAX_BYTES < HISTOGRAM_MEMORY_SIZE) {} histMemMax = HISTOGRAM_MEMORY_SIZE / offsetMax; } lastRunMemBytes = histMemMax; //we know we need 2 runs, to fit everything into the memory //now we check how many taskList per run we can do unsigned long tasksPerRun = numTasks / offsetMax; //check if we have an odd amount of runs bool odd = numTasks % 2 == 1 && offsetMax > 1; // if we have to do more than one run it's important to know if we have an odd amount. unsigned long memoryPerRun = histMemMax; unsigned long MEM_PER_HIST = HISTOGRAM_MEMORY_SIZE / NUM_HISTOGRAMS; if (odd) { memoryPerRun = histMemMax - MEM_PER_HIST / offsetMax; ++offsetMax; lastRunMemBytes = MEM_PER_HIST; } runInfo r = {tasksPerRun, lastRunMemBytes, histMemMax, offsetMax, memoryPerRun, odd, MAX_PER_RUN * sizeof(float)}; return r; } extern "C" __host__ void calculate_histogram_h(float **distanceFields, float *minValues, float *maxValues, const int fieldSize, const unsigned long numFields, const unsigned long histogramSize, unsigned int *h_jointHist, unsigned int *h_colSums, unsigned int *h_rowSums, float* h_simMap, const int simMapSize, const bool multi) { ///setup printf("calculate_histogram_h start\n"); printf("accolacting memory on gpu\n"); float *d_distanceFields; float *d_minValues; float *d_maxValues; unsigned int *d_jointHist; unsigned int *d_colSums; unsigned int *d_rowSums; float *d_simMap; unsigned long NUM_HISTOGRAMS = (numFields * numFields); unsigned long MINMAX_MEMORY_SIZE = numFields * sizeof(*d_minValues) * (multi ? 2 : 1); unsigned long HISTOGRAM_MEMORY_SIZE = numFields * numFields * histogramSize * histogramSize * sizeof(*d_jointHist); unsigned long SUMS_MEMORY_SIZE = numFields * numFields * histogramSize * sizeof(*d_colSums); unsigned long SIMMAP_MEMORY_SIZE = simMapSize * simMapSize * sizeof(*d_simMap); checkCudaErrors(cudaMalloc((void **) &d_distanceFields, numFields * MAX_PER_RUN * sizeof(*d_distanceFields) * (multi ? 2 : 1))); checkCudaErrors(cudaMalloc((void **) &d_minValues, MINMAX_MEMORY_SIZE)); checkCudaErrors(cudaMalloc((void **) &d_maxValues, MINMAX_MEMORY_SIZE)); checkCudaErrors(cudaMalloc((void **) &d_colSums, SUMS_MEMORY_SIZE)); checkCudaErrors(cudaMalloc((void **) &d_rowSums, SUMS_MEMORY_SIZE)); checkCudaErrors(cudaMalloc((void **) &d_simMap, SIMMAP_MEMORY_SIZE)); checkCudaErrors(cudaMemset(d_colSums, 0, SUMS_MEMORY_SIZE)); checkCudaErrors(cudaMemset(d_rowSums, 0, SUMS_MEMORY_SIZE)); checkCudaErrors(cudaMemcpy(d_minValues, minValues, MINMAX_MEMORY_SIZE, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_maxValues, maxValues, MINMAX_MEMORY_SIZE, cudaMemcpyHostToDevice)); ///put into setup function printf("creating tasks\n"); size_t taskSize = numFields * numFields * sizeof(int2); int2 *taskList = (int2 *) malloc(taskSize); size_t c = 0; if (multi) { for (int i = 0; i < numFields; ++i) { for (int j = 0; j < numFields; ++j) { taskList[i * numFields + j] = make_int2(i, j+numFields); ++c; } } } else { for (int i = 0; i < numFields; ++i) { for (int j = 0; j < numFields; ++j) { taskList[c++] = make_int2(i, j); } } } size_t numTasks = c; int2 *d_tasks; taskSize = numTasks * sizeof(*d_tasks); checkCudaErrors(cudaMalloc((void **) &d_tasks, taskSize)); checkCudaErrors(cudaMemcpy(d_tasks, taskList, taskSize, cudaMemcpyHostToDevice)); free(taskList); ///finding the loop rounds printf("getting runInfo\n"); runInfo rInfo = calculateRunInfo(numTasks, HISTOGRAM_MEMORY_SIZE, NUM_HISTOGRAMS); rInfo.multi = multi; // int tasksPerRun = rInfo.tasksPerRun; unsigned long lastRunMemBytes = rInfo.lastRunMemBytes; unsigned long histMemMax = rInfo.histMemMax; unsigned long offsetMax = rInfo.offsetMax; unsigned long memoryPerRun = rInfo.memoryPerRun; bool odd = rInfo.odd; rInfo.maxSize = memoryPerRun / sizeof(unsigned int); printf("memory per run: %lu\n", memoryPerRun); printf("getting data info\n"); dataInfo dInfo = {fieldSize, histogramSize, numFields, simMapSize}; checkCudaErrors(cudaMalloc((void **) &d_jointHist, memoryPerRun)); printf("computing launch params\n"); int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, calculate_histogram_g, 0, 0); // Round up according to array size gridSize = (((int)rInfo.tasksPerRun) + blockSize - 1) / blockSize; size_t f, t; checkCudaErrors(cudaMemGetInfo(&f, &t)); ///main task for (unsigned long offset = 0; offset < offsetMax; ++offset) { checkCudaErrors(cudaMemset(d_jointHist, 0, memoryPerRun)); unsigned long fieldsBytes = MAX_PER_RUN * sizeof(float); rInfo.fieldsLastRun = false; for (unsigned long fieldOffset = 0; fieldOffset < fieldSize; fieldOffset += MAX_PER_RUN) { if (fieldOffset + MAX_PER_RUN >= fieldSize) { rInfo.fieldsLastRun = true; rInfo.fieldsLastRunBytes = (unsigned long) (fieldSize % MAX_PER_RUN); fieldsBytes = rInfo.fieldsLastRunBytes * sizeof(float); } for (unsigned long i = 0; i < numFields*(multi ? 2 : 1); ++i) { checkCudaErrors(cudaMemcpy(&d_distanceFields[i*MAX_PER_RUN], &distanceFields[i][fieldOffset], fieldsBytes, cudaMemcpyHostToDevice)); } calculate_histogram_g << < gridSize , blockSize >> > (d_distanceFields, d_minValues, d_maxValues, d_jointHist, d_colSums, d_rowSums, d_tasks, offset, fieldOffset, d_simMap, rInfo, dInfo); } int ix = (int) (offset * (memoryPerRun / sizeof(int))); unsigned long bytesToCopy = offset + 1 == offsetMax ? lastRunMemBytes : memoryPerRun; // checkCudaErrors(cudaMemcpy(&h_jointHist[ix], d_jointHist, bytesToCopy, cudaMemcpyDeviceToHost)); } ///cleanup checkCudaErrors(cudaFree(d_jointHist)); checkCudaErrors(cudaMemcpy(h_colSums, d_colSums, SUMS_MEMORY_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_rowSums, d_rowSums, SUMS_MEMORY_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_simMap, d_simMap, SIMMAP_MEMORY_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_distanceFields)); checkCudaErrors(cudaFree(d_minValues)); checkCudaErrors(cudaFree(d_maxValues)); checkCudaErrors(cudaFree(d_colSums)); checkCudaErrors(cudaFree(d_rowSums)); checkCudaErrors(cudaFree(d_tasks)); checkCudaErrors(cudaFree(d_simMap)); return; }
df430b30a673accec38b47b8cd63401015a1eb8b.hip
// !!! This is a file automatically generated by hipify!!! // CUDA solver for Rayleigh Plesset #include "bubbles.h" #include "bubble_CUDA.h" #include "bubble_CUDA_kernel.cuh" using namespace thrust; host_vector<double2> focalpoint; host_vector<double2> control; // Mathematical constants const double Pi_h = acos(-1.0); const double Pi4r3_h = Pi_h * 4.0/3.0; mixture_t mixture_h, mixture_htod; bubble_t bubbles_h, bubbles_htod; grid_gen grid_h, grid_htod; sigma_t sigma_h, sigma_htod; // Allocation variables pitch_sizes_t pitches; array_widths_t widths; // Array dimensions int j0m, j0n, i1m, j1m, i1n, j1n, i2m, j2m, i2n, j2n, m1Vol, m2Vol, v_xVol, v_yVol, E_xVol, E_yVol; int numBubbles = 0; // Diagnostic display message #ifdef _DEBUG_ void display(double data[], int xdim, int ydim, int num_lines, char *msg) { printf("%s\n", msg); for (double j = 0; j <= ydim - 1; j+= (ydim-1)/num_lines) { if (!j) { for (double i = 0; i <= xdim - 1; i += (xdim - 1)/num_lines) { printf("\t(%i)\t",(int)i); } printf("\n"); } printf("(%i)\t",(int)j); for (double i = 0; i <= i2m - 1; i += (i2m - 1)/num_lines) { printf("%4.2E\t", data[xdim * (int)j + (int)i]); } printf("\n"); } } #endif thrust::tuple<bool,double2,double2,double2> solve_bubbles(array_index_t *array_index, grid_t *grid_size, PML_t *PML, sim_params_t *sim_params, bub_params_t *bub_params, transducer_t *transducer, debug_t *debug, int save_function, thrust::tuple<bool, double2,double2,double2> pid_init) { // Variables needed for control structures int nstep = 0; double tstep = 0.0, tstepx = 0.0; int loop; double resimax; double s1, s2; int max_iter; // Data thread setup int pthread_count = 0; pthread_t save_thread[sim_params->NSTEPMAX/sim_params->DATA_SAVE]; pthread_attr_t pthread_custom_attr; output_plan_t *plan; pthread_attr_init(&pthread_custom_attr); plan = (output_plan_t *)malloc(sizeof(output_plan_t)); #ifdef _DEBUG_ // Clear terminal for output if (system("clear")) { exit(EXIT_FAILURE); } #endif // Set CUDA configuration setCUDAflags(); // Initialize CUDA streams int num_streams = 3; hipStream_t stream[num_streams]; for (int i = 0; i < num_streams; i++) { hipStreamCreate(&stream[i]); } // Initialize CUDA events hipEvent_t stop[num_streams]; for (int i = 0; i < num_streams; i++) { hipEventCreateWithFlags(&stop[i], hipEventBlockingSync); } // Mixture Parameters mix_params_t *mix_params = (mix_params_t*) calloc(1, sizeof(mix_params_t)); // Initialize Variables printf("Computing Simulation Variables..."); initialize_variables(grid_size, PML, sim_params, transducer, array_index, mix_params, bub_params); printf("\tdone\n"); // Initialize control system printf("Initializing PID controller..."); focalpoint.clear(); control.clear(); focalpoint.push_back(make_double2(0.0,0.0)); double2 pid_cumulative_err = make_double2(0.0,0.0); double2 pid_derivative_err = make_double2(0.0,0.0); if (transducer->pid) { if (thrust::get<0>(pid_init)) { control.push_back(get<1>(pid_init)); pid_cumulative_err = get<2>(pid_init); pid_derivative_err = get<3>(pid_init); } else if (transducer->init_control) { control.push_back(make_double2(0.0,transducer->init_control)); } else { control.push_back(transducer->fp); } } else { control.push_back(transducer->fp); } double control_dist = control[control.size()-1].y / 0.5 / sqrt(3.0); printf("\tdone\n"); // Initialize folders to save to printf("Preparing folders..."); initialize_folders(); printf("\t\t\tdone\n"); // Allocate memory on the device printf("Allocating Memory..."); initialize_CUDA_variables(grid_size, PML, sim_params, transducer, array_index, mix_params, bub_params); printf("\t\t\tdone\n"); // Initialization kernels printf("Running Initialization Kernels..."); // Update Bubble Index if (bub_params->enabled) { update_bubble_indices(stream, stop); } // Calculate the initial state void fraction if (bub_params->enabled) { calculate_void_fraction(mixture_htod, transducer, pitches, widths, stream, stop); } // Set f_gn and f_gm to f_g synchronize_void_fraction(mixture_htod, pitches, stream, stop); printf("\tdone\n"); // Link pointers in plan to simulation plan->mixture_h = mixture_h; plan->bubbles_h = bubbles_h; plan->array_index = array_index; plan->grid_size = grid_size; plan->sim_params = sim_params; plan->transducer = transducer; plan->debug = debug; /************************ * Main simulation loop * ************************/ printf("Running the simulation\t\t"); while (((sim_params->NSTEPMAX != 0) && (nstep < sim_params->NSTEPMAX)) || ((sim_params->TSTEPMAX != 0) && (tstep < sim_params->TSTEPMAX))) { nstep++; // Accurate time addition s1 = tstep; tstep = tstep + mix_params->dt + tstepx; s2 = tstep - s1; tstepx = mix_params->dt + tstepx - s2; hipMemcpyToSymbol(tstep_c, &tstep, sizeof(double)); hipMemcpyToSymbol(focal_point_c, &control[control.size()-1], sizeof(double2)); hipMemcpyToSymbol(focal_dist_c, &control_dist, sizeof(double)); checkCUDAError("Set timestamp"); // Store Bubble and Mixture, and predict void fraction store_variables(mixture_htod, bubbles_htod, pitches, widths, stream, stop); // Update mixture velocity calculate_velocity_field(widths, stream, stop); // Move the bubbles if (bub_params->enabled) { bubble_motion(bubbles_htod, widths, stream, stop); } // Calculate pressure resimax = calculate_pressure_field(mixture_h, mixture_htod, mix_params->P_inf, pitches, widths, stream, stop); // Subloop for solving Rayleigh Plesset equations if (bub_params->enabled) { loop = 0; while (resimax > 1.0e-7f) { loop++; // Find bubble pressure interpolate_bubble_pressure(widths, stream, stop); // Solve Rayleigh-Plesset Equations max_iter = max(solve_bubble_radii(bubbles_htod, stream, stop), max_iter); // Calculate Void Fraction calculate_void_fraction(mixture_htod, transducer, pitches, widths, stream, stop); // Calculate Pressure resimax = calculate_pressure_field(mixture_h, mixture_htod, mix_params->P_inf, pitches, widths, stream, stop); #ifdef _DEBUG_ printf("\033[A\033[2K"); printf("Simulation step %5i subloop %5i \t resimax = %4.2E, inner loop executed %i times.\n", nstep, loop, resimax, max_iter); #endif } } // Calculate mixture temperature calculate_temperature(mixture_htod, bub_params, pitches, widths, stream, stop); // Calculate mixture properties calculate_properties(widths, stream, stop); #ifdef _DEBUG_ if (system("clear")) { exit(EXIT_FAILURE); } #else printf("\r"); #endif // Display progress if (sim_params->NSTEPMAX != 0) { printf("Running the simulation...\t\tnstep : %5i / %i\t", nstep, sim_params->NSTEPMAX); } else if (sim_params->TSTEPMAX !=0) { printf("Running the simulation...\t\ttstep : %4.2E / %4.2E\t", tstep, sim_params->TSTEPMAX); } focalpoint.push_back( make_double2( 0.0, filter_loop( determine_focal_point(mixture_htod.Work, i2m, widths.Work, j2m, grid_size->dx, grid_size->dy).y ) ) ); printf("focal point is (%8.6E, %8.6E)\t", focalpoint[focalpoint.size()-1].x, focalpoint[focalpoint.size()-1].y); if (transducer->pid) { if (nstep > transducer->pid_start_step) { control.push_back( focal_PID(transducer->fp, focalpoint[focalpoint.size()-1], focalpoint[focalpoint.size()-2], &pid_derivative_err, &pid_cumulative_err, mix_params->dt) ); control[control.size()-1] = make_double2( 0.0, clamp<double>( control[control.size()-1].y, transducer->fp.y, grid_size->LY ) ); control_dist = control[control.size()-1].y / 0.5 / sqrt(3.0); } printf("control focal point is (%+4.2E, %+4.2E)\t", control[control.size()-1].x, control[control.size()-1].y); } #ifdef _DEBUG_ printf("\n"); #else fflush(stdout); #endif // Save data at intervals if ((((int)nstep) % ((int)sim_params->DATA_SAVE) == 0)) { // Copy over requested variables if (debug->p0) hipMemcpy2D(mixture_h.p0, sizeof(double)*i1m, mixture_htod.p0, pitches.p0, sizeof(double)*i1m, j1m, hipMemcpyDeviceToHost); if (debug->fg) hipMemcpy2D(mixture_h.f_g, sizeof(double)*i2m, mixture_htod.f_g, pitches.f_g, sizeof(double)*i2m, j2m, hipMemcpyDeviceToHost); if (debug->T) hipMemcpy2D(mixture_h.T, sizeof(double)*i2m, mixture_htod.T, pitches.T, sizeof(double)*i2m, j2m, hipMemcpyDeviceToHost); if (debug->vxy) hipMemcpy2D(mixture_h.vx, sizeof(double)*i2n, mixture_htod.vx, pitches.vx, sizeof(double)*i2n, j2m, hipMemcpyDeviceToHost); if (debug->vxy) hipMemcpy2D(mixture_h.vy, sizeof(double)*i2m, mixture_htod.vy, pitches.vy, sizeof(double)*i2m, j2n, hipMemcpyDeviceToHost); if (debug->bubbles) hipMemcpy(bubbles_h.pos, bubbles_htod.pos, sizeof(double2)*numBubbles, hipMemcpyDeviceToHost); if (debug->bubbles) hipMemcpy(bubbles_h.R_t, bubbles_htod.R_t, sizeof(double)*numBubbles, hipMemcpyDeviceToHost); if (debug->bubbles) hipMemcpy(bubbles_h.PG_p, bubbles_htod.PG_p, sizeof(double)*numBubbles, hipMemcpyDeviceToHost); // Assign the data thread with saving the requested variables #ifdef _OUTPUT_ plan->step = nstep; plan->tstep = (float)tstep; if (save_function & sph) { pthread_create(&save_thread[pthread_count++], &pthread_custom_attr, save_sph, (void *)(plan)); } else if (save_function & ascii) { pthread_create(&save_thread[pthread_count++], &pthread_custom_attr, save_ascii, (void *)(plan)); } #endif #ifdef _DEBUG_ // Display the mixture field variables in square grids in the interactive terminal double num_lines = debug->display - 1; printf("resimax = %4.2E\n\n",resimax); if (debug->fg) { display(mixture_h.fg, i2m, j2m, num_lines, "fg Grid"); } if (debug->p0) { display(mixture_h.p0, i1m, j1m, num_lines, "p0 Grid"); } if (debug->T) { display(mixture_h.T, i2m, j2m, num_lines, "T Grid"); } if (debug->vxy) { display(mixture_h.vx, i2n, j2m, num_lines, "vx Grid"); display(mixture_h.vy, i2m, j2n, num_lines, "vy Grid"); } printf("\n\n"); #endif } } #ifdef _OUTPUT_ for (int i = 0; i < pthread_count; i++) { pthread_join(save_thread[i], NULL); } #endif #ifndef _DEBUG_ printf("\r"); #endif printf("Running the simulation...\t\tdone\n\n "); printf("Cleaning up..."); // Destroy the variables to prevent further errors if (destroy_CUDA_variables(bub_params)) { exit(EXIT_FAILURE); } for (int i = 0; i < num_streams; i++) { hipStreamDestroy(stream[i]); } for (int i = 0; i < num_streams; i++) { hipEventDestroy(stop[i]); } printf("\tdone\n\n"); return thrust::make_tuple( 1, control[control.size()-1], pid_cumulative_err, pid_derivative_err ); } // solve_bubbles() // Initialize simulation variables int initialize_variables(grid_t *grid_size, PML_t *PML, sim_params_t *sim_params, transducer_t *transducer, array_index_t *array_index, mix_params_t *mix_params, bub_params_t *bub_params) { *grid_size = init_grid_size(*grid_size); // Plane Wave *transducer = init_transducer(*transducer, *grid_size); // Array index *array_index = init_array(*grid_size, *sim_params); // Sigma for PML sigma_h = init_sigma(*PML, *sim_params, *grid_size, *array_index); // rxp and xu grid_h = init_grid_vector (*array_index, *grid_size); // Mixture *mix_params = init_mix(); mix_params->dt = mix_set_time_increment(*sim_params, min(grid_size->dx, grid_size->dy), mix_params->cs_inf); mixture_h = init_mix_array(mix_params, *array_index); // Bubbles if (bub_params->enabled) { *bub_params = init_bub_params(*bub_params, *sim_params, mix_params->dt); bubbles_h = init_bub_array(bub_params, mix_params, array_index, grid_size, transducer); } return 0; } // Initialize grid parameters grid_t init_grid_size(grid_t grid_size) { grid_size.dx = (double)grid_size.LX / (double)grid_size.X; grid_size.dy = (double)grid_size.LY / (double)grid_size.Y; grid_size.rdx = (double) 1.0 / (double)grid_size.dx; grid_size.rdy = (double) 1.0 / (double)grid_size.dy; #ifdef _DEBUG_ printf("Grid Size Parameters\n"); printf("dx = %E\tdy = %E\trdx = %E\trdy = %E\n\n", grid_size.dx, grid_size.dy, grid_size.rdx, grid_size.rdy); #endif return grid_size; } // Initialize plane wave coefficients transducer_t init_transducer(transducer_t transducer, grid_t grid_size) { if (transducer.f_dist) { transducer.fp.x = 0.0; transducer.fp.y = transducer.f_dist * 0.5 * sqrt(3.0); } else { transducer.fp.x = 0.0; transducer.fp.y = grid_size.LY * 0.5; } transducer.omega = 2.0 * acos(-1.0) * transducer.freq; return transducer; } // Initializes the array index array_index_t init_array(const grid_t grid_size, const sim_params_t sim_params) { array_index_t a; // a.lmax = (sim_params.deltaBand+1)*(sim_params.deltaBand+1); a.ms = -sim_params.order/2 + 1; a.me = sim_params.order + a.ms - 1; a.ns = -sim_params.order/2; a.ne = sim_params.order + a.ns - 1; a.istam = 1; a.iendm = grid_size.X; a.istan = a.istam - 1; a.iendn = a.iendm; a.ista1m = a.istan + a.ms; a.iend1m = a.iendn + a.me; a.ista1n = a.istam + a.ns; a.iend1n = a.iendm + a.ne; a.ista2m = a.ista1n + a.ms; a.iend2m = a.iend1n + a.me; a.ista2n = a.ista1m + a.ns; a.iend2n = a.iend1m + a.ne; a.jstam = 1; a.jendm = grid_size.Y; a.jstan = a.jstam - 1; a.jendn = a.jendm; a.jsta1m = a.jstan + a.ms; a.jend1m = a.jendn + a.me; a.jsta1n = a.jstam + a.ns; a.jend1n = a.jendm + a.ne; a.jsta2m = a.jsta1n + a.ms; a.jend2m = a.jend1n + a.me; a.jsta2n = a.jsta1m + a.ns; a.jend2n = a.jend1m + a.ne; #ifdef _DEBUG_ printf("Array Index\n"); // printf("lmax : %i\n", a.lmax); printf("ms : %i\t", a.ms); printf("me : %i\n", a.me); printf("ns : %i\t", a.ns); printf("ne : %i\n\n", a.ne); printf("istam : %i\t", a.istam); printf("iendm : %i\t\t", a.iendm); printf("istan : %i\t", a.istan); printf("iendn : %i\n", a.iendn); printf("jstam : %i\t", a.jstam); printf("jendm : %i\t\t", a.jendm); printf("jstan : %i\t", a.jstan); printf("jendn : %i\n", a.jendn); printf("ista1m : %i\t", a.ista1m); printf("iend1m : %i\t\t", a.iend1m); printf("ista1n : %i\t", a.ista1n); printf("iend1n : %i\n", a.iend1n); printf("jsta1m : %i\t", a.jsta1m); printf("jend1m : %i\t\t", a.jend1m); printf("jsta1n : %i\t", a.jsta1n); printf("jend1n : %i\n", a.jend1n); printf("ista2m : %i\t", a.ista2m); printf("iend2m : %i\t\t", a.iend2m); printf("ista2n : %i\t", a.ista2n); printf("iend2n : %i\n", a.iend2n); printf("jsta2m : %i\t", a.jsta2m); printf("jend2m : %i\t\t", a.jend2m); printf("jsta2n : %i\t", a.jsta2n); printf("jend2n : %i\n\n", a.jend2n); #endif //_DEBUG_ return a; } // init_array() // Initializes mixture parameters mix_params_t init_mix() { mix_params_t mix_params; mix_params.T_inf = 293.15; mix_params.P_inf = 0.1e6; mix_params.fg_inf = 1.0e-7; mix_params.rho_inf = density_water(mix_params.P_inf,mix_params.T_inf); mix_params.cs_inf = adiabatic_sound_speed_water(mix_params.P_inf,mix_params.T_inf); return mix_params; } // init_mix() // Set the mixture time step double mix_set_time_increment(sim_params_t sim_params, double dx_min, double u_max) { #ifdef _DEBUG_ printf("sim_params.cfl = %E\tdx_min = %E\tu_max = %E\n",sim_params.cfl, dx_min, u_max); printf("dt = %E\n\n",sim_params.cfl * dx_min / u_max); #endif //_DEBUG_ return sim_params.cfl * dx_min / u_max; } // mix_set_time_increment() // Initializes implicit bubble parameters bub_params_t init_bub_params(bub_params_t bub_params, sim_params_t sim_params, double dt0) { bub_params.R03 = bub_params.R0 * bub_params.R0 * bub_params.R0; bub_params.PG0 = bub_params.PL0 + 2.0 * bub_params.sig/bub_params.R0; bub_params.coeff_alpha = bub_params.gam * bub_params.PG0 * bub_params.R03 / (2.0 * (bub_params.gam - 1.0) * bub_params.T0 * bub_params.K0); bub_params.dt0 = 0.1 * dt0; bub_params.npi = 0; bub_params.mbs = -sim_params.deltaBand / 2 + 1; bub_params.mbe = sim_params.deltaBand + bub_params.mbs - 1; bub_params.nbs = -sim_params.deltaBand / 2; bub_params.nbe = sim_params.deltaBand + bub_params.nbs - 1; #ifdef _DEBUG_ printf("Bubble Parameters\n"); printf("PG0 = %E\tdt0 = %E\nmbs = %i\tmbe = %i\tnbs = %i\tnbe = %i\n\n", bub_params.PG0, bub_params.dt0, bub_params.mbs, bub_params.mbe, bub_params.nbs, bub_params.nbe); #endif // _DEBUG_ return bub_params; } // init_bub_params() // Initializes useful index variables grid_gen init_grid_vector (array_index_t array_index, grid_t grid_size) { grid_gen grid; grid.rxp = (double*) calloc((array_index.iend2m - array_index.ista2m + 1), sizeof(double)); grid.xu = (double*) calloc((array_index.iend2n - array_index.ista2n + 1), sizeof(double)); grid.rxp_size = (array_index.iend2m - array_index.ista2m + 1); grid.xu_size = (array_index.iend2n - array_index.ista2n + 1); for (int i = array_index.ista2m; i <= array_index.iend2m; i++) { grid.rxp[i - array_index.ista2m] = 1.0/(grid_size.dx * ((double)i - 0.5)); } for (int i = array_index.ista2n; i <= array_index.iend2n; i++) { grid.xu[i - array_index.ista2n] = ((double)i) * grid_size.dx; } return grid; } // init_grid_vector() // Initialize the host mixture array mixture_t init_mix_array(mix_params_t * mix_params, array_index_t array_index) { mixture_t mix; #ifdef _DEBUG_ printf("Mixing mixture...\n"); #endif int m1Vol = (array_index.iend1m - array_index.ista1m + 1) * (array_index.jend1m - array_index.jsta1m + 1); int m2Vol = (array_index.iend2m - array_index.ista2m + 1) * (array_index.jend2m - array_index.jsta2m + 1); int v_xVol = (array_index.iend2n - array_index.ista2n + 1) * (array_index.jend2m - array_index.jsta2m + 1); int v_yVol = (array_index.iend2m - array_index.ista2m + 1) * (array_index.jend2n - array_index.jsta2n + 1); int E_xVol = (array_index.iend1n - array_index.ista1n + 1) * (array_index.jend1m - array_index.jsta1m + 1); int E_yVol = (array_index.iend1m - array_index.ista1m + 1) * (array_index.jend1n - array_index.jsta1n + 1); hipHostMalloc((void**)&mix.T, m2Vol*sizeof(double)); hipHostMalloc((void**)&mix.p0, m1Vol*sizeof(double)); hipHostMalloc((void**)&mix.p, m1Vol*sizeof(double2)); hipHostMalloc((void**)&mix.pn, m1Vol*sizeof(double2)); hipHostMalloc((void**)&mix.c_sl, m1Vol*sizeof(double)); hipHostMalloc((void**)&mix.rho_m, m1Vol*sizeof(double)); hipHostMalloc((void**)&mix.rho_l, m1Vol*sizeof(double)); hipHostMalloc((void**)&mix.f_g, m2Vol*sizeof(double)); hipHostMalloc((void**)&mix.f_gn, m2Vol*sizeof(double)); hipHostMalloc((void**)&mix.f_gm, m2Vol*sizeof(double)); hipHostMalloc((void**)&mix.k_m, m2Vol*sizeof(double)); hipHostMalloc((void**)&mix.C_pm, m1Vol*sizeof(double)); hipHostMalloc((void**)&mix.Work, m2Vol*sizeof(double)); hipHostMalloc((void**)&mix.vx, v_xVol*sizeof(double)); hipHostMalloc((void**)&mix.vy, v_yVol*sizeof(double)); hipHostMalloc((void**)&mix.Ex, E_xVol*sizeof(double)); hipHostMalloc((void**)&mix.Ey, E_yVol*sizeof(double)); for (int i = 0; i < m1Vol; i++) { mix.p0[i] = 0.0; mix.p[i] = make_double2(0.0, 0.0); mix.pn[i] = make_double2(0.0, 0.0); mix.rho_m[i] = mix.rho_l[i] = density_water(mix_params->P_inf, mix_params->T_inf); mix.c_sl[i] = adiabatic_sound_speed_water(mix_params->P_inf, mix_params->T_inf); mix.C_pm[i] = specific_heat_water(mix_params->T_inf); mix.k_m[i] = thermal_conductivity_water(mix_params->T_inf); } for (int i = 0; i < v_xVol; i++) { mix.vx[i] = 0.0; } for (int i = 0; i < v_yVol; i++) { mix.vy[i] = 0.0; } for (int i = 0; i < E_xVol; i++) { mix.Ex[i] = 0.0; } for (int i = 0; i < E_yVol; i++) { mix.Ey[i] = 0.0; } for (int i = 0; i < m2Vol; i++) { mix.T[i] = 0.0; mix.f_g[i] = 0.0;//(double) i/m2Vol; mix.Work[i] = 0; } #ifdef _DEBUG_ printf("Mixture grid generated.\n\n"); #endif return mix; } // init_mix_array() // Initialize the host bubble array bubble_t init_bub_array(bub_params_t *bub_params, mix_params_t *mix_params, array_index_t *array_index, grid_t *grid_size, transducer_t *transducer) { double2 pos = make_double2(0.0, 0.0); host_vector<bubble_t_aos> bub; bubble_t_aos init_bubble; bubble_t ret_bub; #ifdef _DEBUG_ printf("Baking bubbles...\n"); #endif for (int i = array_index->istam; i <= array_index->iendm; i++) { pos.x = ( (double)i - 0.5) * grid_size->dx; for (int j = array_index->jstam; j <= array_index->jendm; j++) { pos.y = ( (double)j - 0.5) * grid_size->dy; if (transducer->box_size && (abs(pos.x - transducer->fp.x) < 0.5 * transducer->box_size) && (abs(pos.y - transducer->fp.y) < 0.5 * transducer->box_size)) { init_bubble = bubble_input(pos, bub_params->fg0, *bub_params, *grid_size, *transducer); bub.push_back(init_bubble); } else if (!(transducer->box_size)) { init_bubble = bubble_input(pos, bub_params->fg0, *bub_params, *grid_size, *transducer); bub.push_back(init_bubble); } } } numBubbles = bub_params->npi = bub.size(); hipHostMalloc((void**)&ret_bub.ibm, bub.size()*sizeof(int2)); hipHostMalloc((void**)&ret_bub.ibn, bub.size()*sizeof(int2)); hipHostMalloc((void**)&ret_bub.pos, bub.size()*sizeof(double2)); hipHostMalloc((void**)&ret_bub.R_t, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.R_p, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.R_pn, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.R_n, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.R_nn, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.d1_R_p, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.d1_R_n, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.PG_p, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.PG_n, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.PL_p, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.PL_n, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.PL_m, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.Q_B, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.n_B, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.dt, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.dt_n, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.re, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.re_n, bub.size()*sizeof(double)); hipHostMalloc((void**)&ret_bub.v_B, bub.size()*sizeof(double2)); hipHostMalloc((void**)&ret_bub.v_L, bub.size()*sizeof(double2)); for (int i = 0; i < bub_params->npi; i++) { ret_bub.ibm[i] = bub[i].ibm; ret_bub.ibn[i] = bub[i].ibn; ret_bub.pos[i] = bub[i].pos; ret_bub.R_t[i] = bub[i].R_t; ret_bub.R_p[i] = bub[i].R_p; ret_bub.R_pn[i] = bub[i].R_pn; ret_bub.R_n[i] = bub[i].R_n; ret_bub.R_nn[i] = bub[i].R_nn; ret_bub.d1_R_p[i] = bub[i].d1_R_p; ret_bub.d1_R_n[i] = bub[i].d1_R_n; ret_bub.PG_p[i] = bub[i].PG_p; ret_bub.PG_n[i] = bub[i].PG_n; ret_bub.PL_p[i] = bub[i].PL_p; ret_bub.PL_n[i] = bub[i].PL_n; ret_bub.PL_m[i] = bub[i].PL_m; ret_bub.Q_B[i] = bub[i].Q_B; ret_bub.n_B[i] = bub[i].n_B; ret_bub.dt[i] = bub[i].dt; ret_bub.dt_n[i] = bub[i].dt_n; ret_bub.re[i] = bub[i].re; ret_bub.re_n[i] = bub[i].re_n; ret_bub.v_B[i] = bub[i].v_B; ret_bub.v_L[i] = bub[i].v_L; } #ifdef _DEBUG_ printf("%i bubbles initialized.\n\n", bub_params->npi); #endif return ret_bub; } // Create a new bubble object based on initial conditions bubble_t_aos bubble_input(double2 pos, double fg_in, bub_params_t bub_params, grid_t grid_size, transducer_t transducer) { bubble_t_aos new_bubble; double Pi = acos(-1.0); new_bubble.pos = pos; new_bubble.R_t = bub_params.R0; new_bubble.R_p = new_bubble.R_pn = bub_params.R0; new_bubble.R_n = new_bubble.R_nn = bub_params.R0; new_bubble.d1_R_p = new_bubble.d1_R_n = 0.0; new_bubble.PG_p = new_bubble.PG_n = bub_params.PG0; new_bubble.PL_p = new_bubble.PL_n = new_bubble.PL_m = 0.0; if (transducer.cylindrical) { new_bubble.n_B = fg_in * (pos.x * grid_size.dx * grid_size.dy) / (4.0 / 3.0 * Pi * pow(new_bubble.R_t,3)); } else { new_bubble.n_B = fg_in * (grid_size.dx * grid_size.dy) / (4.0 / 3.0 * Pi * pow(new_bubble.R_t,3)); } new_bubble.Q_B = 0.0; new_bubble.dt = new_bubble.dt_n = bub_params.dt0; new_bubble.re = new_bubble.re_n = 0.0; new_bubble.v_B = new_bubble.v_L = make_double2(0.0, 0.0); new_bubble.ibm = make_int2(0,0); new_bubble.ibn = make_int2(0,0); return new_bubble; } // Initializes the sigma field used for PML sigma_t init_sigma (const PML_t PML, const sim_params_t sim_params, const grid_t grid_size, const array_index_t array_index) { #ifdef _DEBUG_ printf("Generating a perfectly matched layer.\n"); #endif sigma_t sigma; sigma.mx = (double*) calloc((array_index.iend1m - array_index.ista1m + 1), sizeof(double)); sigma.my = (double*) calloc((array_index.jend1m - array_index.jsta1m + 1), sizeof(double)); sigma.nx = (double*) calloc((array_index.iend2n - array_index.ista2n + 1), sizeof(double)); sigma.ny = (double*) calloc((array_index.jend2n - array_index.jsta2n + 1), sizeof(double)); sigma.mx_size = (array_index.iend1m - array_index.ista1m + 1); sigma.my_size = (array_index.jend1m - array_index.jsta1m + 1); sigma.nx_size = (array_index.iend2n - array_index.ista2n + 1); sigma.ny_size = (array_index.jend2n - array_index.jsta2n + 1); int n; int itmps, itmpe, jtmps, jtmpe; double sigma_x_max, sigma_y_max; int npml = PML.NPML; double sig = PML.sigma; double order = PML.order; double dx = grid_size.dx; double dy = grid_size.dy; int nx = grid_size.X; int ny = grid_size.Y; int istam = array_index.istam; int iendm = array_index.iendm; int jstam = array_index.jstam; int jendm = array_index.jendm; int istan = array_index.istan; int iendn = array_index.iendn; int jstan = array_index.jstan; int jendn = array_index.jendn; int ista1m = array_index.ista1m; int jsta1m = array_index.jsta1m; int ista2n = array_index.ista2n; int jsta2n = array_index.jsta2n; int ms = array_index.ms; int me = array_index.me; int ns = array_index.ns; int ne = array_index.ne; if (PML.X0) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (istam <= npml) { itmps = max(istam, ms); itmpe = min(iendm, npml); #ifdef _DEBUG_ printf("Sigma mx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = npml - i + 1; sigma.mx[i - ista1m] = sigma_x_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.mx[i-ista1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.X1) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (nx - npml + 1 <= iendm) { itmps = max(istam, nx - npml + 1); itmpe = min(iendm, nx + me); #ifdef _DEBUG_ printf("Sigma mx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = i - nx + npml; sigma.mx[i - ista1m] = sigma_x_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.mx[i-ista1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y0) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (jstam <= npml) { jtmps = max(jstam, ms); jtmpe = min(jendm, npml); #ifdef _DEBUG_ printf("Sigma my :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = npml - j + 1; sigma.my[j - jsta1m] = sigma_y_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.my[j-jsta1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y1) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (ny - npml + 1 <= jendm) { jtmps = max(jstam, ny - npml + 1); jtmpe = min(jendm, ny + me); #ifdef _DEBUG_ printf("Sigma my :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = j - ny + npml; sigma.my[j - jsta1m] = sigma_y_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.my[j-jsta1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.X0) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (istan <= npml - 1) { itmps = max(istan, ms + ns); itmpe = min(iendn, npml - 1); #ifdef _DEBUG_ printf("Sigma nx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = npml - i; sigma.nx[i - ista2n] = sigma_x_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.nx[i-ista2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.X1) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (nx - npml + 1 <= iendn) { itmps = max(istan, nx - npml + 1); itmpe = min(iendn, nx + me + ne + 1); #ifdef _DEBUG_ printf("Sigma nx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = i - nx + npml; sigma.nx[i - ista2n] = sigma_x_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.nx[i-ista2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y0) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (jstan <= npml - 1) { jtmps = max(jstan, ms + ns); jtmpe = min(jendn, npml - 1); #ifdef _DEBUG_ printf("Sigma ny :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = npml - j; sigma.ny[j - jsta2n] = sigma_y_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.ny[j-jsta2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y1) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (ny - npml + 1 <= jendn) { jtmps = max(jstan, ny - npml + 1); jtmpe = min(jendn, ny + me + ne + 1); #ifdef _DEBUG_ printf("Sigma ny :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = j - ny + npml; sigma.ny[j - jsta2n] = sigma_y_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.ny[j-jsta2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } #ifdef _DEBUG_ printf("PML generated.\n\n"); #endif return sigma; } // init_sigma() // Set CUDA runtime flags void setCUDAflags() { CUDA_SAFE_CALL(hipSetDeviceFlags(hipDeviceScheduleYield)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(BubbleUpdateIndexKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(BubbleInterpolationScalarKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(BubbleInterpolationVelocityKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(BubbleRadiusKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(VoidFractionCylinderKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(VoidFractionReverseLookupKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(VFPredictionKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(VelocityKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(VelocityBoundaryKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(MixturePressureKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(MixtureBoundaryPressureKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(MixtureKMKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(BubbleHeatKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(MixtureEnergyKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(MixtureTemperatureKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(MixtureBoundaryTemperatureKernel, hipFuncCachePreferL1)); CUDA_SAFE_CALL(hipFuncSetCacheConfig(MixturePropertiesKernel, hipFuncCachePreferL1)); } // Allocate and copy variables on device memory int initialize_CUDA_variables(grid_t *grid_size, PML_t *PML, sim_params_t *sim_params, transducer_t *transducer, array_index_t *array_index, mix_params_t *mix_params, bub_params_t *bub_params) { j0m = array_index->jendm - array_index->jstam + 1; j0n = array_index->jendn - array_index->jstan + 1; i1m = array_index->iend1m - array_index->ista1m + 1; j1m = array_index->jend1m - array_index->jsta1m + 1; i1n = array_index->iend1n - array_index->ista1n + 1; j1n = array_index->jend1n - array_index->jsta1n + 1; i2m = array_index->iend2m - array_index->ista2m + 1; j2m = array_index->jend2m - array_index->jsta2m + 1; i2n = array_index->iend2n - array_index->ista2n + 1; j2n = array_index->jend2n - array_index->jsta2n + 1; m1Vol = i1m * j1m; m2Vol = i2m * j2m; v_xVol = i2n * j2m; v_yVol = i2m * j2n; E_xVol = i1n * j1m; E_yVol = i1m * j1n; hipMalloc( (void **)&mixture_htod, sizeof(mixture_t)); hipMallocPitch((void **)&mixture_htod.T, &pitches.T, sizeof(double)*i2m, j2m); hipMallocPitch((void **)&mixture_htod.vx, &pitches.vx, sizeof(double)*i2n, j2m); hipMallocPitch((void **)&mixture_htod.vy, &pitches.vy, sizeof(double)*i2m, j2n); hipMallocPitch((void **)&mixture_htod.c_sl, &pitches.c_sl, sizeof(double)*i1m, j1m); hipMallocPitch((void **)&mixture_htod.rho_m, &pitches.rho_m, sizeof(double)*i1m, j1m); hipMallocPitch((void **)&mixture_htod.rho_l, &pitches.rho_l, sizeof(double)*i1m, j1m); hipMallocPitch((void **)&mixture_htod.f_g, &pitches.f_g, sizeof(double)*i2m, j2m); hipMallocPitch((void **)&mixture_htod.f_gn, &pitches.f_gn, sizeof(double)*i2m, j2m); hipMallocPitch((void **)&mixture_htod.f_gm, &pitches.f_gm, sizeof(double)*i2m, j2m); hipMallocPitch((void **)&mixture_htod.k_m, &pitches.k_m, sizeof(double)*i2m, j2m); hipMallocPitch((void **)&mixture_htod.C_pm, &pitches.C_pm, sizeof(double)*i1m, j1m); hipMallocPitch((void **)&mixture_htod.Work, &pitches.Work, sizeof(double)*i2m, j2m); hipMallocPitch((void **)&mixture_htod.Ex, &pitches.Ex, sizeof(double)*i1n, j1m); hipMallocPitch((void **)&mixture_htod.Ey, &pitches.Ey, sizeof(double)*i1m, j1n); hipMallocPitch((void **)&mixture_htod.p0, &pitches.p0, sizeof(double)*i1m, j1m); hipMallocPitch((void **)&mixture_htod.p, &pitches.p, sizeof(double2)*i1m, j1m); hipMallocPitch((void **)&mixture_htod.pn, &pitches.pn, sizeof(double2)*i1m, j1m); #ifdef _DEBUG_ printf("T = %i\n", (int)pitches.T); printf("vx = %i\n", (int)pitches.vx); printf("vy = %i\n", (int)pitches.vy); printf("c_sl = %i\n", (int)pitches.c_sl); printf("rho_m = %i\n", (int)pitches.rho_m); printf("rho_l = %i\n", (int)pitches.rho_l); printf("f_g = %i\n", (int)pitches.f_g); printf("f_gn = %i\n", (int)pitches.f_gn); printf("f_gm = %i\n", (int)pitches.f_gm); printf("k_m = %i\n", (int)pitches.k_m); printf("C_pm = %i\n", (int)pitches.C_pm); printf("Work = %i\n", (int)pitches.Work); printf("Ex = %i\n", (int)pitches.Ex); printf("Ey = %i\n", (int)pitches.Ey); printf("p0 = %i\n", (int)pitches.p0); printf("p = %i\n", (int)pitches.p); printf("pn = %i\n", (int)pitches.pn); #endif if (bub_params->enabled) { hipMalloc((void **)&bubbles_htod, sizeof(bubble_t)); hipMalloc((void **)&bubbles_htod.ibm, sizeof(int2)*numBubbles); hipMalloc((void **)&bubbles_htod.ibn, sizeof(int2)*numBubbles); hipMalloc((void **)&bubbles_htod.pos, sizeof(double2)*numBubbles); hipMalloc((void **)&bubbles_htod.R_t, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.R_p, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.R_pn, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.R_n, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.R_nn, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.d1_R_p, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.d1_R_n, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.PG_p, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.PG_n, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.PL_p, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.PL_n, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.PL_m, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.Q_B, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.n_B, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.dt, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.dt_n, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.re, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.re_n, sizeof(double)*numBubbles); hipMalloc((void **)&bubbles_htod.v_B, sizeof(double2)*numBubbles); hipMalloc((void **)&bubbles_htod.v_L, sizeof(double2)*numBubbles); } hipMalloc((void **)&sigma_htod, sizeof(sigma_t)); hipMalloc((void **)&sigma_htod.mx, sizeof(double)*sigma_h.mx_size); hipMalloc((void **)&sigma_htod.my, sizeof(double)*sigma_h.my_size); hipMalloc((void **)&sigma_htod.nx, sizeof(double)*sigma_h.nx_size); hipMalloc((void **)&sigma_htod.ny, sizeof(double)*sigma_h.ny_size); hipMalloc((void **)&grid_htod, sizeof(grid_gen)); hipMalloc((void **)&grid_htod.xu, sizeof(double)*grid_h.xu_size); hipMalloc((void **)&grid_htod.rxp, sizeof(double)*grid_h.rxp_size);; checkCUDAError("Memory Allocation"); hipMemcpy2D(mixture_htod.T, pitches.T, mixture_h.T, sizeof(double)*i2m, sizeof(double)*i2m, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.p0, pitches.p0, mixture_h.p0, sizeof(double)*i1m, sizeof(double)*i1m, j1m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.p, pitches.p, mixture_h.p, sizeof(double2)*i1m, sizeof(double2)*i1m, j1m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.pn, pitches.pn, mixture_h.pn, sizeof(double2)*i1m, sizeof(double2)*i1m, j1m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.vx, pitches.vx, mixture_h.vx, sizeof(double)*i2n, sizeof(double)*i2n, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.vy, pitches.vy, mixture_h.vy, sizeof(double)*i2m, sizeof(double)*i2m, j2n, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.c_sl, pitches.c_sl, mixture_h.c_sl, sizeof(double)*i1m, sizeof(double)*i1m, j1m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.rho_m, pitches.rho_m, mixture_h.rho_m, sizeof(double)*i1m, sizeof(double)*i1m, j1m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.rho_l, pitches.rho_l, mixture_h.rho_l, sizeof(double)*i1m, sizeof(double)*i1m, j1m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.f_g, pitches.f_g, mixture_h.f_g, sizeof(double)*i2m, sizeof(double)*i2m, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.f_gn, pitches.f_gn, mixture_h.f_gn, sizeof(double)*i2m, sizeof(double)*i2m, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.f_gm, pitches.f_gm, mixture_h.f_gm, sizeof(double)*i2m, sizeof(double)*i2m, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.k_m, pitches.k_m, mixture_h.k_m, sizeof(double)*i2m, sizeof(double)*i2m, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.C_pm, pitches.C_pm, mixture_h.C_pm, sizeof(double)*i2m, sizeof(double)*i2m, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.Work, pitches.Work, mixture_h.Work, sizeof(double)*i2m, sizeof(double)*i2m, j2m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.Ex, pitches.Ex, mixture_h.Ex, sizeof(double)*i1n, sizeof(double)*i1n, j1m, hipMemcpyHostToDevice); hipMemcpy2D(mixture_htod.Ey, pitches.Ey, mixture_h.Ey, sizeof(double)*i1m, sizeof(double)*i1m, j1n, hipMemcpyHostToDevice); checkCUDAError("Mixture To Device"); if (bub_params->enabled) { hipMemcpy(bubbles_htod.ibm, bubbles_h.ibm, sizeof(int2)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.ibn, bubbles_h.ibn, sizeof(int2)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.pos, bubbles_h.pos, sizeof(double2)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.R_t, bubbles_h.R_t, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.R_p, bubbles_h.R_p, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.R_pn, bubbles_h.R_pn, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.R_n, bubbles_h.R_n, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.R_nn, bubbles_h.R_nn, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.d1_R_p, bubbles_h.d1_R_p, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.d1_R_n, bubbles_h.d1_R_n, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.PG_p, bubbles_h.PG_p, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.PG_n, bubbles_h.PG_n, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.PL_p, bubbles_h.PL_p, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.PL_n, bubbles_h.PL_n, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.PL_m, bubbles_h.PL_m, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.Q_B, bubbles_h.Q_B, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.n_B, bubbles_h.n_B, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.dt, bubbles_h.dt, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.dt_n, bubbles_h.dt_n, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.re, bubbles_h.re, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.re_n, bubbles_h.re_n, sizeof(double)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.v_B, bubbles_h.v_B, sizeof(double2)*numBubbles, hipMemcpyHostToDevice); hipMemcpy(bubbles_htod.v_L, bubbles_h.v_L, sizeof(double2)*numBubbles, hipMemcpyHostToDevice); } // hipMemcpy(&sigma_htod, &sigma_h, // sizeof(sigma_t), hipMemcpyHostToDevice); hipMemcpy(sigma_htod.mx, sigma_h.mx, sizeof(double)*sigma_h.mx_size, hipMemcpyHostToDevice); hipMemcpy(sigma_htod.my, sigma_h.my, sizeof(double)*sigma_h.my_size, hipMemcpyHostToDevice); hipMemcpy(sigma_htod.nx, sigma_h.nx, sizeof(double)*sigma_h.nx_size, hipMemcpyHostToDevice); hipMemcpy(sigma_htod.ny, sigma_h.ny, sizeof(double)*sigma_h.ny_size, hipMemcpyHostToDevice); checkCUDAError("Sigma To Device"); // hipMemcpy(&grid_htod, &grid_h, // sizeof(grid_gen), hipMemcpyHostToDevice); hipMemcpy(grid_htod.xu, grid_h.xu, sizeof(double)*grid_h.xu_size, hipMemcpyHostToDevice); hipMemcpy(grid_htod.rxp, grid_h.rxp, sizeof(double)*grid_h.rxp_size, hipMemcpyHostToDevice); checkCUDAError("Grid To Device"); // Throw constants into cache double3 tmp; tmp.x = 1.0 / ((double)sim_params->deltaBand); tmp.y = 2.0 * acos(-1.0) / ((double)sim_params->deltaBand) * grid_size->rdx; tmp.z = 2.0 * acos(-1.0) / ((double)sim_params->deltaBand) * grid_size->rdy; hipMemcpyToSymbol(mixture_c, &mixture_htod, sizeof(mixture_t)); hipMemcpyToSymbol(sigma_c, &sigma_htod, sizeof(sigma_t)); hipMemcpyToSymbol(gridgen_c, &grid_htod, sizeof(grid_gen)); hipMemcpyToSymbol(Pi, &Pi_h, sizeof(double)); hipMemcpyToSymbol(Pi4r3, &Pi4r3_h, sizeof(double)); hipMemcpyToSymbol(delta_coef, &tmp, sizeof(double3)); hipMemcpyToSymbol(array_c, array_index, sizeof(array_index_t)); hipMemcpyToSymbol(grid_c, grid_size, sizeof(grid_t)); hipMemcpyToSymbol(sim_params_c, sim_params, sizeof(sim_params_t)); hipMemcpyToSymbol(transducer_c, transducer, sizeof(transducer_t)); hipMemcpyToSymbol(mix_params_c, mix_params, sizeof(mix_params_t)); hipMemcpyToSymbol(PML_c, PML, sizeof(PML_t)); if (bub_params->enabled) { hipMemcpyToSymbol(bubbles_c, &bubbles_htod, sizeof(bubble_t)); hipMemcpyToSymbol(bub_params_c, bub_params, sizeof(bub_params_t)); hipMemcpyToSymbol(num_bubbles, &numBubbles, sizeof(int)); } checkCUDAError("Constant Memory Cache"); // Determine the required CUDA parameters widths.T = pitches.T / sizeof(double); widths.P = pitches.P / sizeof(double); widths.p0 = pitches.p0 / sizeof(double); widths.p = pitches.p / sizeof(double2); widths.pn = pitches.pn / sizeof(double2); widths.vx = pitches.vx / sizeof(double); widths.vy = pitches.vy / sizeof(double); widths.c_sl = pitches.c_sl / sizeof(double); widths.rho_m = pitches.rho_m / sizeof(double); widths.rho_l = pitches.rho_l / sizeof(double); widths.f_g = pitches.f_g / sizeof(double); widths.f_gn = pitches.f_gn / sizeof(double); widths.f_gm = pitches.f_gm / sizeof(double); widths.k_m = pitches.k_m / sizeof(double); widths.C_pm = pitches.C_pm / sizeof(double); widths.Work = pitches.Work / sizeof(double); widths.Ex = pitches.Ex / sizeof(double); widths.Ey = pitches.Ey / sizeof(double); return 0; } // Free all CUDA variables int destroy_CUDA_variables(bub_params_t *bub_params) { hipFree(mixture_htod.T); hipFree(mixture_htod.vx); hipFree(mixture_htod.vy); hipFree(mixture_htod.c_sl); hipFree(mixture_htod.rho_m); hipFree(mixture_htod.rho_l); hipFree(mixture_htod.f_g); hipFree(mixture_htod.f_gn); hipFree(mixture_htod.f_gm); hipFree(mixture_htod.k_m); hipFree(mixture_htod.C_pm); hipFree(mixture_htod.Work); hipFree(mixture_htod.Ex); hipFree(mixture_htod.Ey); hipFree(mixture_htod.p0); hipFree(mixture_htod.p); hipFree(mixture_htod.pn); hipHostFree(mixture_h.T); hipHostFree(mixture_h.vx); hipHostFree(mixture_h.vy); hipHostFree(mixture_h.c_sl); hipHostFree(mixture_h.rho_m); hipHostFree(mixture_h.rho_l); hipHostFree(mixture_h.f_g); hipHostFree(mixture_h.f_gn); hipHostFree(mixture_h.f_gm); hipHostFree(mixture_h.k_m); hipHostFree(mixture_h.C_pm); hipHostFree(mixture_h.Work); hipHostFree(mixture_h.Ex); hipHostFree(mixture_h.Ey); hipHostFree(mixture_h.p0); hipHostFree(mixture_h.p); hipHostFree(mixture_h.pn); if (bub_params->enabled) { hipFree(bubbles_htod.ibm); hipFree(bubbles_htod.ibn); hipFree(bubbles_htod.pos); hipFree(bubbles_htod.R_t); hipFree(bubbles_htod.R_p); hipFree(bubbles_htod.R_pn); hipFree(bubbles_htod.R_n); hipFree(bubbles_htod.R_nn); hipFree(bubbles_htod.d1_R_p); hipFree(bubbles_htod.d1_R_n); hipFree(bubbles_htod.PG_p); hipFree(bubbles_htod.PG_n); hipFree(bubbles_htod.PL_p); hipFree(bubbles_htod.PL_n); hipFree(bubbles_htod.PL_m); hipFree(bubbles_htod.Q_B); hipFree(bubbles_htod.n_B); hipFree(bubbles_htod.dt); hipFree(bubbles_htod.dt_n); hipFree(bubbles_htod.re); hipFree(bubbles_htod.re_n); hipFree(bubbles_htod.v_B); hipFree(bubbles_htod.v_L); hipHostFree(bubbles_h.ibm); hipHostFree(bubbles_h.ibn); hipHostFree(bubbles_h.pos); hipHostFree(bubbles_h.R_t); hipHostFree(bubbles_h.R_p); hipHostFree(bubbles_h.R_pn); hipHostFree(bubbles_h.R_n); hipHostFree(bubbles_h.R_nn); hipHostFree(bubbles_h.d1_R_p); hipHostFree(bubbles_h.d1_R_n); hipHostFree(bubbles_h.PG_p); hipHostFree(bubbles_h.PG_n); hipHostFree(bubbles_h.PL_p); hipHostFree(bubbles_h.PL_n); hipHostFree(bubbles_h.PL_m); hipHostFree(bubbles_h.Q_B); hipHostFree(bubbles_h.n_B); hipHostFree(bubbles_h.dt); hipHostFree(bubbles_h.dt_n); hipHostFree(bubbles_h.re); hipHostFree(bubbles_h.re_n); hipHostFree(bubbles_h.v_B); hipHostFree(bubbles_h.v_L); } hipFree(sigma_htod.mx); hipFree(sigma_htod.my); hipFree(sigma_htod.nx); hipFree(sigma_htod.ny); hipFree(grid_htod.xu); hipFree(grid_htod.rxp); checkCUDAError("Memory Allocation"); return 0; } // Checks for any CUDA runtime errors void checkCUDAError( const char *msg) { #ifdef _DEBUG_ hipError_t err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } return; #else return; #endif }
df430b30a673accec38b47b8cd63401015a1eb8b.cu
// CUDA solver for Rayleigh Plesset #include "bubbles.h" #include "bubble_CUDA.h" #include "bubble_CUDA_kernel.cuh" using namespace thrust; host_vector<double2> focalpoint; host_vector<double2> control; // Mathematical constants const double Pi_h = acos(-1.0); const double Pi4r3_h = Pi_h * 4.0/3.0; mixture_t mixture_h, mixture_htod; bubble_t bubbles_h, bubbles_htod; grid_gen grid_h, grid_htod; sigma_t sigma_h, sigma_htod; // Allocation variables pitch_sizes_t pitches; array_widths_t widths; // Array dimensions int j0m, j0n, i1m, j1m, i1n, j1n, i2m, j2m, i2n, j2n, m1Vol, m2Vol, v_xVol, v_yVol, E_xVol, E_yVol; int numBubbles = 0; // Diagnostic display message #ifdef _DEBUG_ void display(double data[], int xdim, int ydim, int num_lines, char *msg) { printf("%s\n", msg); for (double j = 0; j <= ydim - 1; j+= (ydim-1)/num_lines) { if (!j) { for (double i = 0; i <= xdim - 1; i += (xdim - 1)/num_lines) { printf("\t(%i)\t",(int)i); } printf("\n"); } printf("(%i)\t",(int)j); for (double i = 0; i <= i2m - 1; i += (i2m - 1)/num_lines) { printf("%4.2E\t", data[xdim * (int)j + (int)i]); } printf("\n"); } } #endif thrust::tuple<bool,double2,double2,double2> solve_bubbles(array_index_t *array_index, grid_t *grid_size, PML_t *PML, sim_params_t *sim_params, bub_params_t *bub_params, transducer_t *transducer, debug_t *debug, int save_function, thrust::tuple<bool, double2,double2,double2> pid_init) { // Variables needed for control structures int nstep = 0; double tstep = 0.0, tstepx = 0.0; int loop; double resimax; double s1, s2; int max_iter; // Data thread setup int pthread_count = 0; pthread_t save_thread[sim_params->NSTEPMAX/sim_params->DATA_SAVE]; pthread_attr_t pthread_custom_attr; output_plan_t *plan; pthread_attr_init(&pthread_custom_attr); plan = (output_plan_t *)malloc(sizeof(output_plan_t)); #ifdef _DEBUG_ // Clear terminal for output if (system("clear")) { exit(EXIT_FAILURE); } #endif // Set CUDA configuration setCUDAflags(); // Initialize CUDA streams int num_streams = 3; cudaStream_t stream[num_streams]; for (int i = 0; i < num_streams; i++) { cudaStreamCreate(&stream[i]); } // Initialize CUDA events cudaEvent_t stop[num_streams]; for (int i = 0; i < num_streams; i++) { cudaEventCreateWithFlags(&stop[i], cudaEventBlockingSync); } // Mixture Parameters mix_params_t *mix_params = (mix_params_t*) calloc(1, sizeof(mix_params_t)); // Initialize Variables printf("Computing Simulation Variables..."); initialize_variables(grid_size, PML, sim_params, transducer, array_index, mix_params, bub_params); printf("\tdone\n"); // Initialize control system printf("Initializing PID controller..."); focalpoint.clear(); control.clear(); focalpoint.push_back(make_double2(0.0,0.0)); double2 pid_cumulative_err = make_double2(0.0,0.0); double2 pid_derivative_err = make_double2(0.0,0.0); if (transducer->pid) { if (thrust::get<0>(pid_init)) { control.push_back(get<1>(pid_init)); pid_cumulative_err = get<2>(pid_init); pid_derivative_err = get<3>(pid_init); } else if (transducer->init_control) { control.push_back(make_double2(0.0,transducer->init_control)); } else { control.push_back(transducer->fp); } } else { control.push_back(transducer->fp); } double control_dist = control[control.size()-1].y / 0.5 / sqrt(3.0); printf("\tdone\n"); // Initialize folders to save to printf("Preparing folders..."); initialize_folders(); printf("\t\t\tdone\n"); // Allocate memory on the device printf("Allocating Memory..."); initialize_CUDA_variables(grid_size, PML, sim_params, transducer, array_index, mix_params, bub_params); printf("\t\t\tdone\n"); // Initialization kernels printf("Running Initialization Kernels..."); // Update Bubble Index if (bub_params->enabled) { update_bubble_indices(stream, stop); } // Calculate the initial state void fraction if (bub_params->enabled) { calculate_void_fraction(mixture_htod, transducer, pitches, widths, stream, stop); } // Set f_gn and f_gm to f_g synchronize_void_fraction(mixture_htod, pitches, stream, stop); printf("\tdone\n"); // Link pointers in plan to simulation plan->mixture_h = mixture_h; plan->bubbles_h = bubbles_h; plan->array_index = array_index; plan->grid_size = grid_size; plan->sim_params = sim_params; plan->transducer = transducer; plan->debug = debug; /************************ * Main simulation loop * ************************/ printf("Running the simulation\t\t"); while (((sim_params->NSTEPMAX != 0) && (nstep < sim_params->NSTEPMAX)) || ((sim_params->TSTEPMAX != 0) && (tstep < sim_params->TSTEPMAX))) { nstep++; // Accurate time addition s1 = tstep; tstep = tstep + mix_params->dt + tstepx; s2 = tstep - s1; tstepx = mix_params->dt + tstepx - s2; cudaMemcpyToSymbol(tstep_c, &tstep, sizeof(double)); cudaMemcpyToSymbol(focal_point_c, &control[control.size()-1], sizeof(double2)); cudaMemcpyToSymbol(focal_dist_c, &control_dist, sizeof(double)); checkCUDAError("Set timestamp"); // Store Bubble and Mixture, and predict void fraction store_variables(mixture_htod, bubbles_htod, pitches, widths, stream, stop); // Update mixture velocity calculate_velocity_field(widths, stream, stop); // Move the bubbles if (bub_params->enabled) { bubble_motion(bubbles_htod, widths, stream, stop); } // Calculate pressure resimax = calculate_pressure_field(mixture_h, mixture_htod, mix_params->P_inf, pitches, widths, stream, stop); // Subloop for solving Rayleigh Plesset equations if (bub_params->enabled) { loop = 0; while (resimax > 1.0e-7f) { loop++; // Find bubble pressure interpolate_bubble_pressure(widths, stream, stop); // Solve Rayleigh-Plesset Equations max_iter = max(solve_bubble_radii(bubbles_htod, stream, stop), max_iter); // Calculate Void Fraction calculate_void_fraction(mixture_htod, transducer, pitches, widths, stream, stop); // Calculate Pressure resimax = calculate_pressure_field(mixture_h, mixture_htod, mix_params->P_inf, pitches, widths, stream, stop); #ifdef _DEBUG_ printf("\033[A\033[2K"); printf("Simulation step %5i subloop %5i \t resimax = %4.2E, inner loop executed %i times.\n", nstep, loop, resimax, max_iter); #endif } } // Calculate mixture temperature calculate_temperature(mixture_htod, bub_params, pitches, widths, stream, stop); // Calculate mixture properties calculate_properties(widths, stream, stop); #ifdef _DEBUG_ if (system("clear")) { exit(EXIT_FAILURE); } #else printf("\r"); #endif // Display progress if (sim_params->NSTEPMAX != 0) { printf("Running the simulation...\t\tnstep : %5i / %i\t", nstep, sim_params->NSTEPMAX); } else if (sim_params->TSTEPMAX !=0) { printf("Running the simulation...\t\ttstep : %4.2E / %4.2E\t", tstep, sim_params->TSTEPMAX); } focalpoint.push_back( make_double2( 0.0, filter_loop( determine_focal_point(mixture_htod.Work, i2m, widths.Work, j2m, grid_size->dx, grid_size->dy).y ) ) ); printf("focal point is (%8.6E, %8.6E)\t", focalpoint[focalpoint.size()-1].x, focalpoint[focalpoint.size()-1].y); if (transducer->pid) { if (nstep > transducer->pid_start_step) { control.push_back( focal_PID(transducer->fp, focalpoint[focalpoint.size()-1], focalpoint[focalpoint.size()-2], &pid_derivative_err, &pid_cumulative_err, mix_params->dt) ); control[control.size()-1] = make_double2( 0.0, clamp<double>( control[control.size()-1].y, transducer->fp.y, grid_size->LY ) ); control_dist = control[control.size()-1].y / 0.5 / sqrt(3.0); } printf("control focal point is (%+4.2E, %+4.2E)\t", control[control.size()-1].x, control[control.size()-1].y); } #ifdef _DEBUG_ printf("\n"); #else fflush(stdout); #endif // Save data at intervals if ((((int)nstep) % ((int)sim_params->DATA_SAVE) == 0)) { // Copy over requested variables if (debug->p0) cudaMemcpy2D(mixture_h.p0, sizeof(double)*i1m, mixture_htod.p0, pitches.p0, sizeof(double)*i1m, j1m, cudaMemcpyDeviceToHost); if (debug->fg) cudaMemcpy2D(mixture_h.f_g, sizeof(double)*i2m, mixture_htod.f_g, pitches.f_g, sizeof(double)*i2m, j2m, cudaMemcpyDeviceToHost); if (debug->T) cudaMemcpy2D(mixture_h.T, sizeof(double)*i2m, mixture_htod.T, pitches.T, sizeof(double)*i2m, j2m, cudaMemcpyDeviceToHost); if (debug->vxy) cudaMemcpy2D(mixture_h.vx, sizeof(double)*i2n, mixture_htod.vx, pitches.vx, sizeof(double)*i2n, j2m, cudaMemcpyDeviceToHost); if (debug->vxy) cudaMemcpy2D(mixture_h.vy, sizeof(double)*i2m, mixture_htod.vy, pitches.vy, sizeof(double)*i2m, j2n, cudaMemcpyDeviceToHost); if (debug->bubbles) cudaMemcpy(bubbles_h.pos, bubbles_htod.pos, sizeof(double2)*numBubbles, cudaMemcpyDeviceToHost); if (debug->bubbles) cudaMemcpy(bubbles_h.R_t, bubbles_htod.R_t, sizeof(double)*numBubbles, cudaMemcpyDeviceToHost); if (debug->bubbles) cudaMemcpy(bubbles_h.PG_p, bubbles_htod.PG_p, sizeof(double)*numBubbles, cudaMemcpyDeviceToHost); // Assign the data thread with saving the requested variables #ifdef _OUTPUT_ plan->step = nstep; plan->tstep = (float)tstep; if (save_function & sph) { pthread_create(&save_thread[pthread_count++], &pthread_custom_attr, save_sph, (void *)(plan)); } else if (save_function & ascii) { pthread_create(&save_thread[pthread_count++], &pthread_custom_attr, save_ascii, (void *)(plan)); } #endif #ifdef _DEBUG_ // Display the mixture field variables in square grids in the interactive terminal double num_lines = debug->display - 1; printf("resimax = %4.2E\n\n",resimax); if (debug->fg) { display(mixture_h.fg, i2m, j2m, num_lines, "fg Grid"); } if (debug->p0) { display(mixture_h.p0, i1m, j1m, num_lines, "p0 Grid"); } if (debug->T) { display(mixture_h.T, i2m, j2m, num_lines, "T Grid"); } if (debug->vxy) { display(mixture_h.vx, i2n, j2m, num_lines, "vx Grid"); display(mixture_h.vy, i2m, j2n, num_lines, "vy Grid"); } printf("\n\n"); #endif } } #ifdef _OUTPUT_ for (int i = 0; i < pthread_count; i++) { pthread_join(save_thread[i], NULL); } #endif #ifndef _DEBUG_ printf("\r"); #endif printf("Running the simulation...\t\tdone\n\n "); printf("Cleaning up..."); // Destroy the variables to prevent further errors if (destroy_CUDA_variables(bub_params)) { exit(EXIT_FAILURE); } for (int i = 0; i < num_streams; i++) { cudaStreamDestroy(stream[i]); } for (int i = 0; i < num_streams; i++) { cudaEventDestroy(stop[i]); } printf("\tdone\n\n"); return thrust::make_tuple( 1, control[control.size()-1], pid_cumulative_err, pid_derivative_err ); } // solve_bubbles() // Initialize simulation variables int initialize_variables(grid_t *grid_size, PML_t *PML, sim_params_t *sim_params, transducer_t *transducer, array_index_t *array_index, mix_params_t *mix_params, bub_params_t *bub_params) { *grid_size = init_grid_size(*grid_size); // Plane Wave *transducer = init_transducer(*transducer, *grid_size); // Array index *array_index = init_array(*grid_size, *sim_params); // Sigma for PML sigma_h = init_sigma(*PML, *sim_params, *grid_size, *array_index); // rxp and xu grid_h = init_grid_vector (*array_index, *grid_size); // Mixture *mix_params = init_mix(); mix_params->dt = mix_set_time_increment(*sim_params, min(grid_size->dx, grid_size->dy), mix_params->cs_inf); mixture_h = init_mix_array(mix_params, *array_index); // Bubbles if (bub_params->enabled) { *bub_params = init_bub_params(*bub_params, *sim_params, mix_params->dt); bubbles_h = init_bub_array(bub_params, mix_params, array_index, grid_size, transducer); } return 0; } // Initialize grid parameters grid_t init_grid_size(grid_t grid_size) { grid_size.dx = (double)grid_size.LX / (double)grid_size.X; grid_size.dy = (double)grid_size.LY / (double)grid_size.Y; grid_size.rdx = (double) 1.0 / (double)grid_size.dx; grid_size.rdy = (double) 1.0 / (double)grid_size.dy; #ifdef _DEBUG_ printf("Grid Size Parameters\n"); printf("dx = %E\tdy = %E\trdx = %E\trdy = %E\n\n", grid_size.dx, grid_size.dy, grid_size.rdx, grid_size.rdy); #endif return grid_size; } // Initialize plane wave coefficients transducer_t init_transducer(transducer_t transducer, grid_t grid_size) { if (transducer.f_dist) { transducer.fp.x = 0.0; transducer.fp.y = transducer.f_dist * 0.5 * sqrt(3.0); } else { transducer.fp.x = 0.0; transducer.fp.y = grid_size.LY * 0.5; } transducer.omega = 2.0 * acos(-1.0) * transducer.freq; return transducer; } // Initializes the array index array_index_t init_array(const grid_t grid_size, const sim_params_t sim_params) { array_index_t a; // a.lmax = (sim_params.deltaBand+1)*(sim_params.deltaBand+1); a.ms = -sim_params.order/2 + 1; a.me = sim_params.order + a.ms - 1; a.ns = -sim_params.order/2; a.ne = sim_params.order + a.ns - 1; a.istam = 1; a.iendm = grid_size.X; a.istan = a.istam - 1; a.iendn = a.iendm; a.ista1m = a.istan + a.ms; a.iend1m = a.iendn + a.me; a.ista1n = a.istam + a.ns; a.iend1n = a.iendm + a.ne; a.ista2m = a.ista1n + a.ms; a.iend2m = a.iend1n + a.me; a.ista2n = a.ista1m + a.ns; a.iend2n = a.iend1m + a.ne; a.jstam = 1; a.jendm = grid_size.Y; a.jstan = a.jstam - 1; a.jendn = a.jendm; a.jsta1m = a.jstan + a.ms; a.jend1m = a.jendn + a.me; a.jsta1n = a.jstam + a.ns; a.jend1n = a.jendm + a.ne; a.jsta2m = a.jsta1n + a.ms; a.jend2m = a.jend1n + a.me; a.jsta2n = a.jsta1m + a.ns; a.jend2n = a.jend1m + a.ne; #ifdef _DEBUG_ printf("Array Index\n"); // printf("lmax : %i\n", a.lmax); printf("ms : %i\t", a.ms); printf("me : %i\n", a.me); printf("ns : %i\t", a.ns); printf("ne : %i\n\n", a.ne); printf("istam : %i\t", a.istam); printf("iendm : %i\t\t", a.iendm); printf("istan : %i\t", a.istan); printf("iendn : %i\n", a.iendn); printf("jstam : %i\t", a.jstam); printf("jendm : %i\t\t", a.jendm); printf("jstan : %i\t", a.jstan); printf("jendn : %i\n", a.jendn); printf("ista1m : %i\t", a.ista1m); printf("iend1m : %i\t\t", a.iend1m); printf("ista1n : %i\t", a.ista1n); printf("iend1n : %i\n", a.iend1n); printf("jsta1m : %i\t", a.jsta1m); printf("jend1m : %i\t\t", a.jend1m); printf("jsta1n : %i\t", a.jsta1n); printf("jend1n : %i\n", a.jend1n); printf("ista2m : %i\t", a.ista2m); printf("iend2m : %i\t\t", a.iend2m); printf("ista2n : %i\t", a.ista2n); printf("iend2n : %i\n", a.iend2n); printf("jsta2m : %i\t", a.jsta2m); printf("jend2m : %i\t\t", a.jend2m); printf("jsta2n : %i\t", a.jsta2n); printf("jend2n : %i\n\n", a.jend2n); #endif //_DEBUG_ return a; } // init_array() // Initializes mixture parameters mix_params_t init_mix() { mix_params_t mix_params; mix_params.T_inf = 293.15; mix_params.P_inf = 0.1e6; mix_params.fg_inf = 1.0e-7; mix_params.rho_inf = density_water(mix_params.P_inf,mix_params.T_inf); mix_params.cs_inf = adiabatic_sound_speed_water(mix_params.P_inf,mix_params.T_inf); return mix_params; } // init_mix() // Set the mixture time step double mix_set_time_increment(sim_params_t sim_params, double dx_min, double u_max) { #ifdef _DEBUG_ printf("sim_params.cfl = %E\tdx_min = %E\tu_max = %E\n",sim_params.cfl, dx_min, u_max); printf("dt = %E\n\n",sim_params.cfl * dx_min / u_max); #endif //_DEBUG_ return sim_params.cfl * dx_min / u_max; } // mix_set_time_increment() // Initializes implicit bubble parameters bub_params_t init_bub_params(bub_params_t bub_params, sim_params_t sim_params, double dt0) { bub_params.R03 = bub_params.R0 * bub_params.R0 * bub_params.R0; bub_params.PG0 = bub_params.PL0 + 2.0 * bub_params.sig/bub_params.R0; bub_params.coeff_alpha = bub_params.gam * bub_params.PG0 * bub_params.R03 / (2.0 * (bub_params.gam - 1.0) * bub_params.T0 * bub_params.K0); bub_params.dt0 = 0.1 * dt0; bub_params.npi = 0; bub_params.mbs = -sim_params.deltaBand / 2 + 1; bub_params.mbe = sim_params.deltaBand + bub_params.mbs - 1; bub_params.nbs = -sim_params.deltaBand / 2; bub_params.nbe = sim_params.deltaBand + bub_params.nbs - 1; #ifdef _DEBUG_ printf("Bubble Parameters\n"); printf("PG0 = %E\tdt0 = %E\nmbs = %i\tmbe = %i\tnbs = %i\tnbe = %i\n\n", bub_params.PG0, bub_params.dt0, bub_params.mbs, bub_params.mbe, bub_params.nbs, bub_params.nbe); #endif // _DEBUG_ return bub_params; } // init_bub_params() // Initializes useful index variables grid_gen init_grid_vector (array_index_t array_index, grid_t grid_size) { grid_gen grid; grid.rxp = (double*) calloc((array_index.iend2m - array_index.ista2m + 1), sizeof(double)); grid.xu = (double*) calloc((array_index.iend2n - array_index.ista2n + 1), sizeof(double)); grid.rxp_size = (array_index.iend2m - array_index.ista2m + 1); grid.xu_size = (array_index.iend2n - array_index.ista2n + 1); for (int i = array_index.ista2m; i <= array_index.iend2m; i++) { grid.rxp[i - array_index.ista2m] = 1.0/(grid_size.dx * ((double)i - 0.5)); } for (int i = array_index.ista2n; i <= array_index.iend2n; i++) { grid.xu[i - array_index.ista2n] = ((double)i) * grid_size.dx; } return grid; } // init_grid_vector() // Initialize the host mixture array mixture_t init_mix_array(mix_params_t * mix_params, array_index_t array_index) { mixture_t mix; #ifdef _DEBUG_ printf("Mixing mixture...\n"); #endif int m1Vol = (array_index.iend1m - array_index.ista1m + 1) * (array_index.jend1m - array_index.jsta1m + 1); int m2Vol = (array_index.iend2m - array_index.ista2m + 1) * (array_index.jend2m - array_index.jsta2m + 1); int v_xVol = (array_index.iend2n - array_index.ista2n + 1) * (array_index.jend2m - array_index.jsta2m + 1); int v_yVol = (array_index.iend2m - array_index.ista2m + 1) * (array_index.jend2n - array_index.jsta2n + 1); int E_xVol = (array_index.iend1n - array_index.ista1n + 1) * (array_index.jend1m - array_index.jsta1m + 1); int E_yVol = (array_index.iend1m - array_index.ista1m + 1) * (array_index.jend1n - array_index.jsta1n + 1); cudaMallocHost((void**)&mix.T, m2Vol*sizeof(double)); cudaMallocHost((void**)&mix.p0, m1Vol*sizeof(double)); cudaMallocHost((void**)&mix.p, m1Vol*sizeof(double2)); cudaMallocHost((void**)&mix.pn, m1Vol*sizeof(double2)); cudaMallocHost((void**)&mix.c_sl, m1Vol*sizeof(double)); cudaMallocHost((void**)&mix.rho_m, m1Vol*sizeof(double)); cudaMallocHost((void**)&mix.rho_l, m1Vol*sizeof(double)); cudaMallocHost((void**)&mix.f_g, m2Vol*sizeof(double)); cudaMallocHost((void**)&mix.f_gn, m2Vol*sizeof(double)); cudaMallocHost((void**)&mix.f_gm, m2Vol*sizeof(double)); cudaMallocHost((void**)&mix.k_m, m2Vol*sizeof(double)); cudaMallocHost((void**)&mix.C_pm, m1Vol*sizeof(double)); cudaMallocHost((void**)&mix.Work, m2Vol*sizeof(double)); cudaMallocHost((void**)&mix.vx, v_xVol*sizeof(double)); cudaMallocHost((void**)&mix.vy, v_yVol*sizeof(double)); cudaMallocHost((void**)&mix.Ex, E_xVol*sizeof(double)); cudaMallocHost((void**)&mix.Ey, E_yVol*sizeof(double)); for (int i = 0; i < m1Vol; i++) { mix.p0[i] = 0.0; mix.p[i] = make_double2(0.0, 0.0); mix.pn[i] = make_double2(0.0, 0.0); mix.rho_m[i] = mix.rho_l[i] = density_water(mix_params->P_inf, mix_params->T_inf); mix.c_sl[i] = adiabatic_sound_speed_water(mix_params->P_inf, mix_params->T_inf); mix.C_pm[i] = specific_heat_water(mix_params->T_inf); mix.k_m[i] = thermal_conductivity_water(mix_params->T_inf); } for (int i = 0; i < v_xVol; i++) { mix.vx[i] = 0.0; } for (int i = 0; i < v_yVol; i++) { mix.vy[i] = 0.0; } for (int i = 0; i < E_xVol; i++) { mix.Ex[i] = 0.0; } for (int i = 0; i < E_yVol; i++) { mix.Ey[i] = 0.0; } for (int i = 0; i < m2Vol; i++) { mix.T[i] = 0.0; mix.f_g[i] = 0.0;//(double) i/m2Vol; mix.Work[i] = 0; } #ifdef _DEBUG_ printf("Mixture grid generated.\n\n"); #endif return mix; } // init_mix_array() // Initialize the host bubble array bubble_t init_bub_array(bub_params_t *bub_params, mix_params_t *mix_params, array_index_t *array_index, grid_t *grid_size, transducer_t *transducer) { double2 pos = make_double2(0.0, 0.0); host_vector<bubble_t_aos> bub; bubble_t_aos init_bubble; bubble_t ret_bub; #ifdef _DEBUG_ printf("Baking bubbles...\n"); #endif for (int i = array_index->istam; i <= array_index->iendm; i++) { pos.x = ( (double)i - 0.5) * grid_size->dx; for (int j = array_index->jstam; j <= array_index->jendm; j++) { pos.y = ( (double)j - 0.5) * grid_size->dy; if (transducer->box_size && (abs(pos.x - transducer->fp.x) < 0.5 * transducer->box_size) && (abs(pos.y - transducer->fp.y) < 0.5 * transducer->box_size)) { init_bubble = bubble_input(pos, bub_params->fg0, *bub_params, *grid_size, *transducer); bub.push_back(init_bubble); } else if (!(transducer->box_size)) { init_bubble = bubble_input(pos, bub_params->fg0, *bub_params, *grid_size, *transducer); bub.push_back(init_bubble); } } } numBubbles = bub_params->npi = bub.size(); cudaMallocHost((void**)&ret_bub.ibm, bub.size()*sizeof(int2)); cudaMallocHost((void**)&ret_bub.ibn, bub.size()*sizeof(int2)); cudaMallocHost((void**)&ret_bub.pos, bub.size()*sizeof(double2)); cudaMallocHost((void**)&ret_bub.R_t, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.R_p, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.R_pn, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.R_n, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.R_nn, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.d1_R_p, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.d1_R_n, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.PG_p, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.PG_n, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.PL_p, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.PL_n, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.PL_m, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.Q_B, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.n_B, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.dt, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.dt_n, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.re, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.re_n, bub.size()*sizeof(double)); cudaMallocHost((void**)&ret_bub.v_B, bub.size()*sizeof(double2)); cudaMallocHost((void**)&ret_bub.v_L, bub.size()*sizeof(double2)); for (int i = 0; i < bub_params->npi; i++) { ret_bub.ibm[i] = bub[i].ibm; ret_bub.ibn[i] = bub[i].ibn; ret_bub.pos[i] = bub[i].pos; ret_bub.R_t[i] = bub[i].R_t; ret_bub.R_p[i] = bub[i].R_p; ret_bub.R_pn[i] = bub[i].R_pn; ret_bub.R_n[i] = bub[i].R_n; ret_bub.R_nn[i] = bub[i].R_nn; ret_bub.d1_R_p[i] = bub[i].d1_R_p; ret_bub.d1_R_n[i] = bub[i].d1_R_n; ret_bub.PG_p[i] = bub[i].PG_p; ret_bub.PG_n[i] = bub[i].PG_n; ret_bub.PL_p[i] = bub[i].PL_p; ret_bub.PL_n[i] = bub[i].PL_n; ret_bub.PL_m[i] = bub[i].PL_m; ret_bub.Q_B[i] = bub[i].Q_B; ret_bub.n_B[i] = bub[i].n_B; ret_bub.dt[i] = bub[i].dt; ret_bub.dt_n[i] = bub[i].dt_n; ret_bub.re[i] = bub[i].re; ret_bub.re_n[i] = bub[i].re_n; ret_bub.v_B[i] = bub[i].v_B; ret_bub.v_L[i] = bub[i].v_L; } #ifdef _DEBUG_ printf("%i bubbles initialized.\n\n", bub_params->npi); #endif return ret_bub; } // Create a new bubble object based on initial conditions bubble_t_aos bubble_input(double2 pos, double fg_in, bub_params_t bub_params, grid_t grid_size, transducer_t transducer) { bubble_t_aos new_bubble; double Pi = acos(-1.0); new_bubble.pos = pos; new_bubble.R_t = bub_params.R0; new_bubble.R_p = new_bubble.R_pn = bub_params.R0; new_bubble.R_n = new_bubble.R_nn = bub_params.R0; new_bubble.d1_R_p = new_bubble.d1_R_n = 0.0; new_bubble.PG_p = new_bubble.PG_n = bub_params.PG0; new_bubble.PL_p = new_bubble.PL_n = new_bubble.PL_m = 0.0; if (transducer.cylindrical) { new_bubble.n_B = fg_in * (pos.x * grid_size.dx * grid_size.dy) / (4.0 / 3.0 * Pi * pow(new_bubble.R_t,3)); } else { new_bubble.n_B = fg_in * (grid_size.dx * grid_size.dy) / (4.0 / 3.0 * Pi * pow(new_bubble.R_t,3)); } new_bubble.Q_B = 0.0; new_bubble.dt = new_bubble.dt_n = bub_params.dt0; new_bubble.re = new_bubble.re_n = 0.0; new_bubble.v_B = new_bubble.v_L = make_double2(0.0, 0.0); new_bubble.ibm = make_int2(0,0); new_bubble.ibn = make_int2(0,0); return new_bubble; } // Initializes the sigma field used for PML sigma_t init_sigma (const PML_t PML, const sim_params_t sim_params, const grid_t grid_size, const array_index_t array_index) { #ifdef _DEBUG_ printf("Generating a perfectly matched layer.\n"); #endif sigma_t sigma; sigma.mx = (double*) calloc((array_index.iend1m - array_index.ista1m + 1), sizeof(double)); sigma.my = (double*) calloc((array_index.jend1m - array_index.jsta1m + 1), sizeof(double)); sigma.nx = (double*) calloc((array_index.iend2n - array_index.ista2n + 1), sizeof(double)); sigma.ny = (double*) calloc((array_index.jend2n - array_index.jsta2n + 1), sizeof(double)); sigma.mx_size = (array_index.iend1m - array_index.ista1m + 1); sigma.my_size = (array_index.jend1m - array_index.jsta1m + 1); sigma.nx_size = (array_index.iend2n - array_index.ista2n + 1); sigma.ny_size = (array_index.jend2n - array_index.jsta2n + 1); int n; int itmps, itmpe, jtmps, jtmpe; double sigma_x_max, sigma_y_max; int npml = PML.NPML; double sig = PML.sigma; double order = PML.order; double dx = grid_size.dx; double dy = grid_size.dy; int nx = grid_size.X; int ny = grid_size.Y; int istam = array_index.istam; int iendm = array_index.iendm; int jstam = array_index.jstam; int jendm = array_index.jendm; int istan = array_index.istan; int iendn = array_index.iendn; int jstan = array_index.jstan; int jendn = array_index.jendn; int ista1m = array_index.ista1m; int jsta1m = array_index.jsta1m; int ista2n = array_index.ista2n; int jsta2n = array_index.jsta2n; int ms = array_index.ms; int me = array_index.me; int ns = array_index.ns; int ne = array_index.ne; if (PML.X0) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (istam <= npml) { itmps = max(istam, ms); itmpe = min(iendm, npml); #ifdef _DEBUG_ printf("Sigma mx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = npml - i + 1; sigma.mx[i - ista1m] = sigma_x_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.mx[i-ista1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.X1) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (nx - npml + 1 <= iendm) { itmps = max(istam, nx - npml + 1); itmpe = min(iendm, nx + me); #ifdef _DEBUG_ printf("Sigma mx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = i - nx + npml; sigma.mx[i - ista1m] = sigma_x_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.mx[i-ista1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y0) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (jstam <= npml) { jtmps = max(jstam, ms); jtmpe = min(jendm, npml); #ifdef _DEBUG_ printf("Sigma my :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = npml - j + 1; sigma.my[j - jsta1m] = sigma_y_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.my[j-jsta1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y1) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (ny - npml + 1 <= jendm) { jtmps = max(jstam, ny - npml + 1); jtmpe = min(jendm, ny + me); #ifdef _DEBUG_ printf("Sigma my :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = j - ny + npml; sigma.my[j - jsta1m] = sigma_y_max * pow(((double)n-0.5)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.my[j-jsta1m]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.X0) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (istan <= npml - 1) { itmps = max(istan, ms + ns); itmpe = min(iendn, npml - 1); #ifdef _DEBUG_ printf("Sigma nx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = npml - i; sigma.nx[i - ista2n] = sigma_x_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.nx[i-ista2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.X1) { sigma_x_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dx * (double)npml); if (nx - npml + 1 <= iendn) { itmps = max(istan, nx - npml + 1); itmpe = min(iendn, nx + me + ne + 1); #ifdef _DEBUG_ printf("Sigma nx :\t"); #endif for (int i = itmps; i <= itmpe; i++) { n = i - nx + npml; sigma.nx[i - ista2n] = sigma_x_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.nx[i-ista2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y0) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (jstan <= npml - 1) { jtmps = max(jstan, ms + ns); jtmpe = min(jendn, npml - 1); #ifdef _DEBUG_ printf("Sigma ny :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = npml - j; sigma.ny[j - jsta2n] = sigma_y_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.ny[j-jsta2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } if (PML.Y1) { sigma_y_max = -log(sig) * 0.5 * ((double)order + 1.0) / (dy * (double)npml); if (ny - npml + 1 <= jendn) { jtmps = max(jstan, ny - npml + 1); jtmpe = min(jendn, ny + me + ne + 1); #ifdef _DEBUG_ printf("Sigma ny :\t"); #endif for (int j = jtmps; j <= jtmpe; j++) { n = j - ny + npml; sigma.ny[j - jsta2n] = sigma_y_max * pow(((double)n)/((double)npml), order); #ifdef _DEBUG_ printf("%4.2E\t",sigma.ny[j-jsta2n]); #endif } #ifdef _DEBUG_ printf("\n"); #endif } } #ifdef _DEBUG_ printf("PML generated.\n\n"); #endif return sigma; } // init_sigma() // Set CUDA runtime flags void setCUDAflags() { CUDA_SAFE_CALL(cudaSetDeviceFlags(cudaDeviceScheduleYield)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(BubbleUpdateIndexKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(BubbleInterpolationScalarKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(BubbleInterpolationVelocityKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(BubbleRadiusKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(VoidFractionCylinderKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(VoidFractionReverseLookupKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(VFPredictionKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(VelocityKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(VelocityBoundaryKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(MixturePressureKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(MixtureBoundaryPressureKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(MixtureKMKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(BubbleHeatKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(MixtureEnergyKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(MixtureTemperatureKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(MixtureBoundaryTemperatureKernel, cudaFuncCachePreferL1)); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(MixturePropertiesKernel, cudaFuncCachePreferL1)); } // Allocate and copy variables on device memory int initialize_CUDA_variables(grid_t *grid_size, PML_t *PML, sim_params_t *sim_params, transducer_t *transducer, array_index_t *array_index, mix_params_t *mix_params, bub_params_t *bub_params) { j0m = array_index->jendm - array_index->jstam + 1; j0n = array_index->jendn - array_index->jstan + 1; i1m = array_index->iend1m - array_index->ista1m + 1; j1m = array_index->jend1m - array_index->jsta1m + 1; i1n = array_index->iend1n - array_index->ista1n + 1; j1n = array_index->jend1n - array_index->jsta1n + 1; i2m = array_index->iend2m - array_index->ista2m + 1; j2m = array_index->jend2m - array_index->jsta2m + 1; i2n = array_index->iend2n - array_index->ista2n + 1; j2n = array_index->jend2n - array_index->jsta2n + 1; m1Vol = i1m * j1m; m2Vol = i2m * j2m; v_xVol = i2n * j2m; v_yVol = i2m * j2n; E_xVol = i1n * j1m; E_yVol = i1m * j1n; cudaMalloc( (void **)&mixture_htod, sizeof(mixture_t)); cudaMallocPitch((void **)&mixture_htod.T, &pitches.T, sizeof(double)*i2m, j2m); cudaMallocPitch((void **)&mixture_htod.vx, &pitches.vx, sizeof(double)*i2n, j2m); cudaMallocPitch((void **)&mixture_htod.vy, &pitches.vy, sizeof(double)*i2m, j2n); cudaMallocPitch((void **)&mixture_htod.c_sl, &pitches.c_sl, sizeof(double)*i1m, j1m); cudaMallocPitch((void **)&mixture_htod.rho_m, &pitches.rho_m, sizeof(double)*i1m, j1m); cudaMallocPitch((void **)&mixture_htod.rho_l, &pitches.rho_l, sizeof(double)*i1m, j1m); cudaMallocPitch((void **)&mixture_htod.f_g, &pitches.f_g, sizeof(double)*i2m, j2m); cudaMallocPitch((void **)&mixture_htod.f_gn, &pitches.f_gn, sizeof(double)*i2m, j2m); cudaMallocPitch((void **)&mixture_htod.f_gm, &pitches.f_gm, sizeof(double)*i2m, j2m); cudaMallocPitch((void **)&mixture_htod.k_m, &pitches.k_m, sizeof(double)*i2m, j2m); cudaMallocPitch((void **)&mixture_htod.C_pm, &pitches.C_pm, sizeof(double)*i1m, j1m); cudaMallocPitch((void **)&mixture_htod.Work, &pitches.Work, sizeof(double)*i2m, j2m); cudaMallocPitch((void **)&mixture_htod.Ex, &pitches.Ex, sizeof(double)*i1n, j1m); cudaMallocPitch((void **)&mixture_htod.Ey, &pitches.Ey, sizeof(double)*i1m, j1n); cudaMallocPitch((void **)&mixture_htod.p0, &pitches.p0, sizeof(double)*i1m, j1m); cudaMallocPitch((void **)&mixture_htod.p, &pitches.p, sizeof(double2)*i1m, j1m); cudaMallocPitch((void **)&mixture_htod.pn, &pitches.pn, sizeof(double2)*i1m, j1m); #ifdef _DEBUG_ printf("T = %i\n", (int)pitches.T); printf("vx = %i\n", (int)pitches.vx); printf("vy = %i\n", (int)pitches.vy); printf("c_sl = %i\n", (int)pitches.c_sl); printf("rho_m = %i\n", (int)pitches.rho_m); printf("rho_l = %i\n", (int)pitches.rho_l); printf("f_g = %i\n", (int)pitches.f_g); printf("f_gn = %i\n", (int)pitches.f_gn); printf("f_gm = %i\n", (int)pitches.f_gm); printf("k_m = %i\n", (int)pitches.k_m); printf("C_pm = %i\n", (int)pitches.C_pm); printf("Work = %i\n", (int)pitches.Work); printf("Ex = %i\n", (int)pitches.Ex); printf("Ey = %i\n", (int)pitches.Ey); printf("p0 = %i\n", (int)pitches.p0); printf("p = %i\n", (int)pitches.p); printf("pn = %i\n", (int)pitches.pn); #endif if (bub_params->enabled) { cudaMalloc((void **)&bubbles_htod, sizeof(bubble_t)); cudaMalloc((void **)&bubbles_htod.ibm, sizeof(int2)*numBubbles); cudaMalloc((void **)&bubbles_htod.ibn, sizeof(int2)*numBubbles); cudaMalloc((void **)&bubbles_htod.pos, sizeof(double2)*numBubbles); cudaMalloc((void **)&bubbles_htod.R_t, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.R_p, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.R_pn, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.R_n, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.R_nn, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.d1_R_p, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.d1_R_n, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.PG_p, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.PG_n, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.PL_p, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.PL_n, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.PL_m, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.Q_B, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.n_B, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.dt, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.dt_n, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.re, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.re_n, sizeof(double)*numBubbles); cudaMalloc((void **)&bubbles_htod.v_B, sizeof(double2)*numBubbles); cudaMalloc((void **)&bubbles_htod.v_L, sizeof(double2)*numBubbles); } cudaMalloc((void **)&sigma_htod, sizeof(sigma_t)); cudaMalloc((void **)&sigma_htod.mx, sizeof(double)*sigma_h.mx_size); cudaMalloc((void **)&sigma_htod.my, sizeof(double)*sigma_h.my_size); cudaMalloc((void **)&sigma_htod.nx, sizeof(double)*sigma_h.nx_size); cudaMalloc((void **)&sigma_htod.ny, sizeof(double)*sigma_h.ny_size); cudaMalloc((void **)&grid_htod, sizeof(grid_gen)); cudaMalloc((void **)&grid_htod.xu, sizeof(double)*grid_h.xu_size); cudaMalloc((void **)&grid_htod.rxp, sizeof(double)*grid_h.rxp_size);; checkCUDAError("Memory Allocation"); cudaMemcpy2D(mixture_htod.T, pitches.T, mixture_h.T, sizeof(double)*i2m, sizeof(double)*i2m, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.p0, pitches.p0, mixture_h.p0, sizeof(double)*i1m, sizeof(double)*i1m, j1m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.p, pitches.p, mixture_h.p, sizeof(double2)*i1m, sizeof(double2)*i1m, j1m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.pn, pitches.pn, mixture_h.pn, sizeof(double2)*i1m, sizeof(double2)*i1m, j1m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.vx, pitches.vx, mixture_h.vx, sizeof(double)*i2n, sizeof(double)*i2n, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.vy, pitches.vy, mixture_h.vy, sizeof(double)*i2m, sizeof(double)*i2m, j2n, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.c_sl, pitches.c_sl, mixture_h.c_sl, sizeof(double)*i1m, sizeof(double)*i1m, j1m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.rho_m, pitches.rho_m, mixture_h.rho_m, sizeof(double)*i1m, sizeof(double)*i1m, j1m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.rho_l, pitches.rho_l, mixture_h.rho_l, sizeof(double)*i1m, sizeof(double)*i1m, j1m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.f_g, pitches.f_g, mixture_h.f_g, sizeof(double)*i2m, sizeof(double)*i2m, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.f_gn, pitches.f_gn, mixture_h.f_gn, sizeof(double)*i2m, sizeof(double)*i2m, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.f_gm, pitches.f_gm, mixture_h.f_gm, sizeof(double)*i2m, sizeof(double)*i2m, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.k_m, pitches.k_m, mixture_h.k_m, sizeof(double)*i2m, sizeof(double)*i2m, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.C_pm, pitches.C_pm, mixture_h.C_pm, sizeof(double)*i2m, sizeof(double)*i2m, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.Work, pitches.Work, mixture_h.Work, sizeof(double)*i2m, sizeof(double)*i2m, j2m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.Ex, pitches.Ex, mixture_h.Ex, sizeof(double)*i1n, sizeof(double)*i1n, j1m, cudaMemcpyHostToDevice); cudaMemcpy2D(mixture_htod.Ey, pitches.Ey, mixture_h.Ey, sizeof(double)*i1m, sizeof(double)*i1m, j1n, cudaMemcpyHostToDevice); checkCUDAError("Mixture To Device"); if (bub_params->enabled) { cudaMemcpy(bubbles_htod.ibm, bubbles_h.ibm, sizeof(int2)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.ibn, bubbles_h.ibn, sizeof(int2)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.pos, bubbles_h.pos, sizeof(double2)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.R_t, bubbles_h.R_t, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.R_p, bubbles_h.R_p, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.R_pn, bubbles_h.R_pn, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.R_n, bubbles_h.R_n, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.R_nn, bubbles_h.R_nn, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.d1_R_p, bubbles_h.d1_R_p, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.d1_R_n, bubbles_h.d1_R_n, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.PG_p, bubbles_h.PG_p, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.PG_n, bubbles_h.PG_n, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.PL_p, bubbles_h.PL_p, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.PL_n, bubbles_h.PL_n, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.PL_m, bubbles_h.PL_m, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.Q_B, bubbles_h.Q_B, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.n_B, bubbles_h.n_B, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.dt, bubbles_h.dt, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.dt_n, bubbles_h.dt_n, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.re, bubbles_h.re, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.re_n, bubbles_h.re_n, sizeof(double)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.v_B, bubbles_h.v_B, sizeof(double2)*numBubbles, cudaMemcpyHostToDevice); cudaMemcpy(bubbles_htod.v_L, bubbles_h.v_L, sizeof(double2)*numBubbles, cudaMemcpyHostToDevice); } // cudaMemcpy(&sigma_htod, &sigma_h, // sizeof(sigma_t), cudaMemcpyHostToDevice); cudaMemcpy(sigma_htod.mx, sigma_h.mx, sizeof(double)*sigma_h.mx_size, cudaMemcpyHostToDevice); cudaMemcpy(sigma_htod.my, sigma_h.my, sizeof(double)*sigma_h.my_size, cudaMemcpyHostToDevice); cudaMemcpy(sigma_htod.nx, sigma_h.nx, sizeof(double)*sigma_h.nx_size, cudaMemcpyHostToDevice); cudaMemcpy(sigma_htod.ny, sigma_h.ny, sizeof(double)*sigma_h.ny_size, cudaMemcpyHostToDevice); checkCUDAError("Sigma To Device"); // cudaMemcpy(&grid_htod, &grid_h, // sizeof(grid_gen), cudaMemcpyHostToDevice); cudaMemcpy(grid_htod.xu, grid_h.xu, sizeof(double)*grid_h.xu_size, cudaMemcpyHostToDevice); cudaMemcpy(grid_htod.rxp, grid_h.rxp, sizeof(double)*grid_h.rxp_size, cudaMemcpyHostToDevice); checkCUDAError("Grid To Device"); // Throw constants into cache double3 tmp; tmp.x = 1.0 / ((double)sim_params->deltaBand); tmp.y = 2.0 * acos(-1.0) / ((double)sim_params->deltaBand) * grid_size->rdx; tmp.z = 2.0 * acos(-1.0) / ((double)sim_params->deltaBand) * grid_size->rdy; cudaMemcpyToSymbol(mixture_c, &mixture_htod, sizeof(mixture_t)); cudaMemcpyToSymbol(sigma_c, &sigma_htod, sizeof(sigma_t)); cudaMemcpyToSymbol(gridgen_c, &grid_htod, sizeof(grid_gen)); cudaMemcpyToSymbol(Pi, &Pi_h, sizeof(double)); cudaMemcpyToSymbol(Pi4r3, &Pi4r3_h, sizeof(double)); cudaMemcpyToSymbol(delta_coef, &tmp, sizeof(double3)); cudaMemcpyToSymbol(array_c, array_index, sizeof(array_index_t)); cudaMemcpyToSymbol(grid_c, grid_size, sizeof(grid_t)); cudaMemcpyToSymbol(sim_params_c, sim_params, sizeof(sim_params_t)); cudaMemcpyToSymbol(transducer_c, transducer, sizeof(transducer_t)); cudaMemcpyToSymbol(mix_params_c, mix_params, sizeof(mix_params_t)); cudaMemcpyToSymbol(PML_c, PML, sizeof(PML_t)); if (bub_params->enabled) { cudaMemcpyToSymbol(bubbles_c, &bubbles_htod, sizeof(bubble_t)); cudaMemcpyToSymbol(bub_params_c, bub_params, sizeof(bub_params_t)); cudaMemcpyToSymbol(num_bubbles, &numBubbles, sizeof(int)); } checkCUDAError("Constant Memory Cache"); // Determine the required CUDA parameters widths.T = pitches.T / sizeof(double); widths.P = pitches.P / sizeof(double); widths.p0 = pitches.p0 / sizeof(double); widths.p = pitches.p / sizeof(double2); widths.pn = pitches.pn / sizeof(double2); widths.vx = pitches.vx / sizeof(double); widths.vy = pitches.vy / sizeof(double); widths.c_sl = pitches.c_sl / sizeof(double); widths.rho_m = pitches.rho_m / sizeof(double); widths.rho_l = pitches.rho_l / sizeof(double); widths.f_g = pitches.f_g / sizeof(double); widths.f_gn = pitches.f_gn / sizeof(double); widths.f_gm = pitches.f_gm / sizeof(double); widths.k_m = pitches.k_m / sizeof(double); widths.C_pm = pitches.C_pm / sizeof(double); widths.Work = pitches.Work / sizeof(double); widths.Ex = pitches.Ex / sizeof(double); widths.Ey = pitches.Ey / sizeof(double); return 0; } // Free all CUDA variables int destroy_CUDA_variables(bub_params_t *bub_params) { cudaFree(mixture_htod.T); cudaFree(mixture_htod.vx); cudaFree(mixture_htod.vy); cudaFree(mixture_htod.c_sl); cudaFree(mixture_htod.rho_m); cudaFree(mixture_htod.rho_l); cudaFree(mixture_htod.f_g); cudaFree(mixture_htod.f_gn); cudaFree(mixture_htod.f_gm); cudaFree(mixture_htod.k_m); cudaFree(mixture_htod.C_pm); cudaFree(mixture_htod.Work); cudaFree(mixture_htod.Ex); cudaFree(mixture_htod.Ey); cudaFree(mixture_htod.p0); cudaFree(mixture_htod.p); cudaFree(mixture_htod.pn); cudaFreeHost(mixture_h.T); cudaFreeHost(mixture_h.vx); cudaFreeHost(mixture_h.vy); cudaFreeHost(mixture_h.c_sl); cudaFreeHost(mixture_h.rho_m); cudaFreeHost(mixture_h.rho_l); cudaFreeHost(mixture_h.f_g); cudaFreeHost(mixture_h.f_gn); cudaFreeHost(mixture_h.f_gm); cudaFreeHost(mixture_h.k_m); cudaFreeHost(mixture_h.C_pm); cudaFreeHost(mixture_h.Work); cudaFreeHost(mixture_h.Ex); cudaFreeHost(mixture_h.Ey); cudaFreeHost(mixture_h.p0); cudaFreeHost(mixture_h.p); cudaFreeHost(mixture_h.pn); if (bub_params->enabled) { cudaFree(bubbles_htod.ibm); cudaFree(bubbles_htod.ibn); cudaFree(bubbles_htod.pos); cudaFree(bubbles_htod.R_t); cudaFree(bubbles_htod.R_p); cudaFree(bubbles_htod.R_pn); cudaFree(bubbles_htod.R_n); cudaFree(bubbles_htod.R_nn); cudaFree(bubbles_htod.d1_R_p); cudaFree(bubbles_htod.d1_R_n); cudaFree(bubbles_htod.PG_p); cudaFree(bubbles_htod.PG_n); cudaFree(bubbles_htod.PL_p); cudaFree(bubbles_htod.PL_n); cudaFree(bubbles_htod.PL_m); cudaFree(bubbles_htod.Q_B); cudaFree(bubbles_htod.n_B); cudaFree(bubbles_htod.dt); cudaFree(bubbles_htod.dt_n); cudaFree(bubbles_htod.re); cudaFree(bubbles_htod.re_n); cudaFree(bubbles_htod.v_B); cudaFree(bubbles_htod.v_L); cudaFreeHost(bubbles_h.ibm); cudaFreeHost(bubbles_h.ibn); cudaFreeHost(bubbles_h.pos); cudaFreeHost(bubbles_h.R_t); cudaFreeHost(bubbles_h.R_p); cudaFreeHost(bubbles_h.R_pn); cudaFreeHost(bubbles_h.R_n); cudaFreeHost(bubbles_h.R_nn); cudaFreeHost(bubbles_h.d1_R_p); cudaFreeHost(bubbles_h.d1_R_n); cudaFreeHost(bubbles_h.PG_p); cudaFreeHost(bubbles_h.PG_n); cudaFreeHost(bubbles_h.PL_p); cudaFreeHost(bubbles_h.PL_n); cudaFreeHost(bubbles_h.PL_m); cudaFreeHost(bubbles_h.Q_B); cudaFreeHost(bubbles_h.n_B); cudaFreeHost(bubbles_h.dt); cudaFreeHost(bubbles_h.dt_n); cudaFreeHost(bubbles_h.re); cudaFreeHost(bubbles_h.re_n); cudaFreeHost(bubbles_h.v_B); cudaFreeHost(bubbles_h.v_L); } cudaFree(sigma_htod.mx); cudaFree(sigma_htod.my); cudaFree(sigma_htod.nx); cudaFree(sigma_htod.ny); cudaFree(grid_htod.xu); cudaFree(grid_htod.rxp); checkCUDAError("Memory Allocation"); return 0; } // Checks for any CUDA runtime errors void checkCUDAError( const char *msg) { #ifdef _DEBUG_ cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } return; #else return; #endif }
55fc8ec29b11c4c73813f30eddaeb37dd6d564a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "affine_2d_cuda_kernel.cuh" using namespace at; void Affine2DForwardCUDAKernelLauncher( const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int sampling_ratio, const bool aligned, const int order, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor output) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "Affine2DLaucherForward", ([&] { const scalar_t *bottom_data = features.contiguous().data<scalar_t>(); const scalar_t *rois_data = rois.contiguous().data<scalar_t>(); scalar_t *top_data = output.contiguous().data<scalar_t>(); hipLaunchKernelGGL(( affine_2d_forward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_data, rois_data, scalar_t(spatial_scale), sampling_ratio, aligned, order, channels, height, width, pooled_height, pooled_width, top_data); })); AT_CUDA_CHECK(hipGetLastError()); }
55fc8ec29b11c4c73813f30eddaeb37dd6d564a7.cu
#include "affine_2d_cuda_kernel.cuh" using namespace at; void Affine2DForwardCUDAKernelLauncher( const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int sampling_ratio, const bool aligned, const int order, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor output) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "Affine2DLaucherForward", ([&] { const scalar_t *bottom_data = features.contiguous().data<scalar_t>(); const scalar_t *rois_data = rois.contiguous().data<scalar_t>(); scalar_t *top_data = output.contiguous().data<scalar_t>(); affine_2d_forward_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data, rois_data, scalar_t(spatial_scale), sampling_ratio, aligned, order, channels, height, width, pooled_height, pooled_width, top_data); })); AT_CUDA_CHECK(cudaGetLastError()); }
a02dcce59fd59709ed3e0881d3da7a89f684ed2e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <mpi.h> #define GPU_DIRECT_MPI int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); int mpiSize = 0; int mpiRank = 0; MPI_Status mpiStatus; MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); if (mpiRank == 0) { std::cout << "Running on " << mpiSize << " Processes." << std::endl; } std::cout << "In Process [" << mpiRank << "/" << mpiSize << "]" << std::endl; const int numCommunications = 10; const int bufferSize = 256 * 1024 * 1024; // 1GB data float *sendBuffer; float *recvBuffer; int numDevices = 0; hipGetDeviceCount(&numDevices); hipSetDevice(numDevices - 1); #ifndef GPU_DIRECT_MPI float* cpuSendBuffer; float* cpuRecvBuffer; #endif float *gpuSendBuffer; float *gpuRecvBuffer; hipMalloc(&gpuSendBuffer, sizeof(float) * bufferSize); hipMalloc(&gpuRecvBuffer, sizeof(float) * bufferSize); for (int i = 0; i < numCommunications; ++i) { #ifndef GPU_DIRECT_MPI cpuSendBuffer = (float*) malloc(sizeof(float) * bufferSize); cpuRecvBuffer = (float*) malloc(sizeof(float) * bufferSize); hipMemcpy(cpuSendBuffer, gpuSendBuffer, sizeof(float) * bufferSize, hipMemcpyDeviceToHost); sendBuffer = cpuSendBuffer; recvBuffer = cpuRecvBuffer; #else sendBuffer = gpuSendBuffer; recvBuffer = gpuRecvBuffer; #endif int left, right; right = (mpiRank + 1) % mpiSize; left = mpiRank - 1; if (left < 0) { left = mpiSize - 1; } /* std::cout << "MPI_Sendrecv Arguments:" << std::endl << "left: " << left << " right: " << right << " sendBuffer: " << sendBuffer << " recvBuffer: " << recvBuffer << std::endl; */ MPI_Sendrecv(sendBuffer, bufferSize, MPI_FLOAT, left, 123, recvBuffer, bufferSize, MPI_FLOAT, right, 123, MPI_COMM_WORLD, &mpiStatus); #ifndef GPU_DIRECT_MPI hipMemcpy(cpuRecvBuffer, gpuRecvBuffer, sizeof(float) * bufferSize, hipMemcpyDeviceToHost); #endif } hipFree(gpuSendBuffer); hipFree(gpuRecvBuffer); #ifndef GPU_DIRECT_MPI free(cpuSendBuffer); free(cpuRecvBuffer); #endif MPI_Finalize(); return 0; }
a02dcce59fd59709ed3e0881d3da7a89f684ed2e.cu
#include <iostream> #include <mpi.h> #define GPU_DIRECT_MPI int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); int mpiSize = 0; int mpiRank = 0; MPI_Status mpiStatus; MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); if (mpiRank == 0) { std::cout << "Running on " << mpiSize << " Processes." << std::endl; } std::cout << "In Process [" << mpiRank << "/" << mpiSize << "]" << std::endl; const int numCommunications = 10; const int bufferSize = 256 * 1024 * 1024; // 1GB data float *sendBuffer; float *recvBuffer; int numDevices = 0; cudaGetDeviceCount(&numDevices); cudaSetDevice(numDevices - 1); #ifndef GPU_DIRECT_MPI float* cpuSendBuffer; float* cpuRecvBuffer; #endif float *gpuSendBuffer; float *gpuRecvBuffer; cudaMalloc(&gpuSendBuffer, sizeof(float) * bufferSize); cudaMalloc(&gpuRecvBuffer, sizeof(float) * bufferSize); for (int i = 0; i < numCommunications; ++i) { #ifndef GPU_DIRECT_MPI cpuSendBuffer = (float*) malloc(sizeof(float) * bufferSize); cpuRecvBuffer = (float*) malloc(sizeof(float) * bufferSize); cudaMemcpy(cpuSendBuffer, gpuSendBuffer, sizeof(float) * bufferSize, cudaMemcpyDeviceToHost); sendBuffer = cpuSendBuffer; recvBuffer = cpuRecvBuffer; #else sendBuffer = gpuSendBuffer; recvBuffer = gpuRecvBuffer; #endif int left, right; right = (mpiRank + 1) % mpiSize; left = mpiRank - 1; if (left < 0) { left = mpiSize - 1; } /* std::cout << "MPI_Sendrecv Arguments:" << std::endl << "left: " << left << " right: " << right << " sendBuffer: " << sendBuffer << " recvBuffer: " << recvBuffer << std::endl; */ MPI_Sendrecv(sendBuffer, bufferSize, MPI_FLOAT, left, 123, recvBuffer, bufferSize, MPI_FLOAT, right, 123, MPI_COMM_WORLD, &mpiStatus); #ifndef GPU_DIRECT_MPI cudaMemcpy(cpuRecvBuffer, gpuRecvBuffer, sizeof(float) * bufferSize, cudaMemcpyDeviceToHost); #endif } cudaFree(gpuSendBuffer); cudaFree(gpuRecvBuffer); #ifndef GPU_DIRECT_MPI free(cpuSendBuffer); free(cpuRecvBuffer); #endif MPI_Finalize(); return 0; }
6542176697d6057f1e2cd0118a7a8ec9f4651bbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ctranslate2/ops/mean.h" #include <hipcub/hipcub.hpp> #include "type_dispatch.h" #include "cuda/helpers.h" namespace ctranslate2 { namespace ops { constexpr dim_t num_threads = 256; template <typename T, typename AccumT> __global__ void mean_kernel(const T* input, const cuda::index_t outer_size, const cuda::index_t axis_size, const cuda::index_t inner_size, T* output) { typedef hipcub::BlockReduce<AccumT, num_threads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; const cuda::index_t i = blockIdx.x / inner_size; const cuda::index_t j = blockIdx.x % inner_size; AccumT thread_sum = 0; for (cuda::index_t k = threadIdx.x; k < axis_size; k += blockDim.x) { thread_sum += AccumT(input[i * axis_size * inner_size + k * inner_size + j]); } AccumT sum = BlockReduce(temp_storage).Sum(thread_sum); if (threadIdx.x == 0) { output[blockIdx.x] = sum / AccumT(axis_size); } } template <Device D, typename T> void Mean::compute(const StorageView& input, const dim_t outer_size, const dim_t axis_size, const dim_t inner_size, StorageView& output) const { const dim_t blocks = ::min(outer_size * inner_size, cuda::max_blocks); hipLaunchKernelGGL(( mean_kernel<cuda::device_type<T>, float>), dim3(blocks), dim3(num_threads), 0, cuda::get_cuda_stream(), cuda::device_cast(input.data<T>()), outer_size, axis_size, inner_size, cuda::device_cast(output.data<T>())); } #define DECLARE_IMPL(T) \ template void \ Mean::compute<Device::CUDA, T>(const StorageView& input, \ const dim_t outer_size, \ const dim_t axis_size, \ const dim_t inner_size, \ StorageView& output) const; DECLARE_IMPL(float) DECLARE_IMPL(float16_t) DECLARE_IMPL(bfloat16_t) } }
6542176697d6057f1e2cd0118a7a8ec9f4651bbd.cu
#include "ctranslate2/ops/mean.h" #include <cub/block/block_reduce.cuh> #include "type_dispatch.h" #include "cuda/helpers.h" namespace ctranslate2 { namespace ops { constexpr dim_t num_threads = 256; template <typename T, typename AccumT> __global__ void mean_kernel(const T* input, const cuda::index_t outer_size, const cuda::index_t axis_size, const cuda::index_t inner_size, T* output) { typedef cub::BlockReduce<AccumT, num_threads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; const cuda::index_t i = blockIdx.x / inner_size; const cuda::index_t j = blockIdx.x % inner_size; AccumT thread_sum = 0; for (cuda::index_t k = threadIdx.x; k < axis_size; k += blockDim.x) { thread_sum += AccumT(input[i * axis_size * inner_size + k * inner_size + j]); } AccumT sum = BlockReduce(temp_storage).Sum(thread_sum); if (threadIdx.x == 0) { output[blockIdx.x] = sum / AccumT(axis_size); } } template <Device D, typename T> void Mean::compute(const StorageView& input, const dim_t outer_size, const dim_t axis_size, const dim_t inner_size, StorageView& output) const { const dim_t blocks = std::min(outer_size * inner_size, cuda::max_blocks); mean_kernel<cuda::device_type<T>, float><<<blocks, num_threads, 0, cuda::get_cuda_stream()>>>( cuda::device_cast(input.data<T>()), outer_size, axis_size, inner_size, cuda::device_cast(output.data<T>())); } #define DECLARE_IMPL(T) \ template void \ Mean::compute<Device::CUDA, T>(const StorageView& input, \ const dim_t outer_size, \ const dim_t axis_size, \ const dim_t inner_size, \ StorageView& output) const; DECLARE_IMPL(float) DECLARE_IMPL(float16_t) DECLARE_IMPL(bfloat16_t) } }
ad0133c40840820a66fe6c00445f51b111eb96d9.hip
// !!! This is a file automatically generated by hipify!!! // ATIVIDADE 7 - PROGRAMACAO EM CUDA // Ana Julia de Oliveira Bellini - RA 111.774 // Willian Dihanster Gomes de Oliveira - RA 112.269 #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #define tam 1.0 #define dx 0.00001 #define dt 0.000001 #define T 0.01 #define kappa 0.000045 #define THREADS_PER_BLOCK 512 typedef long long int nint; __global__ void calcular(double *u, double *prev, double k, double t, double x, int n) { nint i = blockDim.x * blockIdx.x + threadIdx.x; if (i > 0 && i < (n - 1)) u[i] = prev[i] + k * t / (x * x) * (prev[i-1] - 2 * prev[i] + prev[i+1]); } __global__ void contorno(double *u, int n) { nint i = blockDim.x * blockIdx.x + threadIdx.x; if(i == 1) u[0] = u[n] = 0.0; } int main(void) { double *tmp, *u, *u_prev; double x = dx, t; nint i, n, maxloc, size, num_blocos; double *d_u, *d_u_prev; /* Calculando quantidade de pontos */ n = tam / dx; num_blocos = ceil((n + 1) / THREADS_PER_BLOCK); /* Alocando vetores */ u = (double *) malloc((n + 1) * sizeof(double)); u_prev = (double *) malloc((n + 1) * sizeof(double)); size = (n + 1) * sizeof(nint); hipMalloc((void **) &d_u, size); hipMalloc((void **) &d_u_prev, size); printf("Inicio: qtde=%ld, dt=%g, dx=%g, dx=%g, kappa=%f, const=%f\n", (n+1), dt, dx, dx*dx, kappa, kappa*dt/(dx*dx)); printf("Iteracoes previstas: %g\n", T/dt); x = 0; for (i = 0; i < n + 1; i++) { if (x <= 0.5) u_prev[i] = 200 * x; else u_prev[i] = 200 * (1.-x); x += dx; } printf("dx=%g, x=%g, x-dx=%g\n", dx, x, x-dx); printf("u_prev[0,1]=%g, %g\n",u_prev[0],u_prev[1]); printf("u_prev[n-1,n]=%g, %g\n",u_prev[n-1],u_prev[n]); hipMemcpy(d_u_prev, u_prev, size, hipMemcpyHostToDevice); t = 0.; while (t < T) { x = dx; /* Chama a funcao de que sera realizada no device */ hipLaunchKernelGGL(( calcular) , dim3(num_blocos), dim3(THREADS_PER_BLOCK), 0, 0, d_u, d_u_prev, kappa, dt, dx, n + 1); /* Espera que toda threads terminem os calculos */ hipDeviceSynchronize(); /* Agora deve calcular a condicao de contorno */ hipLaunchKernelGGL(( contorno) , dim3(num_blocos), dim3(THREADS_PER_BLOCK), 0, 0, d_u, n); /* Troca entre ponteiros */ tmp = d_u_prev; d_u_prev = d_u; d_u = tmp; t += dt; } hipMemcpy(u, d_u, size, hipMemcpyDeviceToHost); /* Calculando o maior valor e sua localizacao */ maxloc = 0; for (i = 1; i < n + 1; i++) if (u[i] > u[maxloc]) maxloc = i; printf("Maior valor u[%ld] = %g\n", maxloc, u[maxloc]); /* Liberando a memeoria do host */ free(u); free(u_prev); u = NULL; u_prev = NULL; /* Liberando a memeoria do device */ hipFree(d_u); hipFree(d_u_prev); return 0; }
ad0133c40840820a66fe6c00445f51b111eb96d9.cu
// ATIVIDADE 7 - PROGRAMACAO EM CUDA // Ana Julia de Oliveira Bellini - RA 111.774 // Willian Dihanster Gomes de Oliveira - RA 112.269 #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #define tam 1.0 #define dx 0.00001 #define dt 0.000001 #define T 0.01 #define kappa 0.000045 #define THREADS_PER_BLOCK 512 typedef long long int nint; __global__ void calcular(double *u, double *prev, double k, double t, double x, int n) { nint i = blockDim.x * blockIdx.x + threadIdx.x; if (i > 0 && i < (n - 1)) u[i] = prev[i] + k * t / (x * x) * (prev[i-1] - 2 * prev[i] + prev[i+1]); } __global__ void contorno(double *u, int n) { nint i = blockDim.x * blockIdx.x + threadIdx.x; if(i == 1) u[0] = u[n] = 0.0; } int main(void) { double *tmp, *u, *u_prev; double x = dx, t; nint i, n, maxloc, size, num_blocos; double *d_u, *d_u_prev; /* Calculando quantidade de pontos */ n = tam / dx; num_blocos = ceil((n + 1) / THREADS_PER_BLOCK); /* Alocando vetores */ u = (double *) malloc((n + 1) * sizeof(double)); u_prev = (double *) malloc((n + 1) * sizeof(double)); size = (n + 1) * sizeof(nint); cudaMalloc((void **) &d_u, size); cudaMalloc((void **) &d_u_prev, size); printf("Inicio: qtde=%ld, dt=%g, dx=%g, dx²=%g, kappa=%f, const=%f\n", (n+1), dt, dx, dx*dx, kappa, kappa*dt/(dx*dx)); printf("Iteracoes previstas: %g\n", T/dt); x = 0; for (i = 0; i < n + 1; i++) { if (x <= 0.5) u_prev[i] = 200 * x; else u_prev[i] = 200 * (1.-x); x += dx; } printf("dx=%g, x=%g, x-dx=%g\n", dx, x, x-dx); printf("u_prev[0,1]=%g, %g\n",u_prev[0],u_prev[1]); printf("u_prev[n-1,n]=%g, %g\n",u_prev[n-1],u_prev[n]); cudaMemcpy(d_u_prev, u_prev, size, cudaMemcpyHostToDevice); t = 0.; while (t < T) { x = dx; /* Chama a funcao de que sera realizada no device */ calcular <<<num_blocos, THREADS_PER_BLOCK>>> (d_u, d_u_prev, kappa, dt, dx, n + 1); /* Espera que toda threads terminem os calculos */ cudaDeviceSynchronize(); /* Agora deve calcular a condicao de contorno */ contorno <<<num_blocos, THREADS_PER_BLOCK>>> (d_u, n); /* Troca entre ponteiros */ tmp = d_u_prev; d_u_prev = d_u; d_u = tmp; t += dt; } cudaMemcpy(u, d_u, size, cudaMemcpyDeviceToHost); /* Calculando o maior valor e sua localizacao */ maxloc = 0; for (i = 1; i < n + 1; i++) if (u[i] > u[maxloc]) maxloc = i; printf("Maior valor u[%ld] = %g\n", maxloc, u[maxloc]); /* Liberando a memeoria do host */ free(u); free(u_prev); u = NULL; u_prev = NULL; /* Liberando a memeoria do device */ cudaFree(d_u); cudaFree(d_u_prev); return 0; }
78dacda874c1f0fcec8256cb86a893a2d03af931.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * Changes in this code has been editted to render gray levels only * and NOT RGB. */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_math.h> bool mallocVolumeArray = false; typedef unsigned int uint; typedef unsigned char uchar; hipArray *d_volumeArray = 0; hipStream_t renderStream; texture<float, 3, hipReadModeElementType> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_render(float *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { const int maxSteps = 500; const float tstep = 0.01f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color __shared__ float sum[256]; __shared__ float subtractValue[256]; __shared__ float opacThreshold[256]; float t = tnear; int thrIdx = threadIdx.x; sum[thrIdx] = 0; subtractValue[thrIdx] = 0; opacThreshold[thrIdx] = 0.90f; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); sample *= 0.2f; if (sum[thrIdx]>0.0f) { subtractValue[thrIdx] += 0.01f; opacThreshold[thrIdx] -= 0.02f; } if (sum[thrIdx]==0.0f && sample > voxelThreshold) { sum[thrIdx] += sample; } else if (sum[threadIdx.x]>0.0f && sample - subtractValue[thrIdx] > 0.0f) { sum[thrIdx] += sample - subtractValue[thrIdx]; } if (sum[thrIdx] >= opacThreshold[thrIdx]) break; t += tstep; if (t > tfar) break; pos += step; } d_output[y*imageW + x] = sum[thrIdx]; } /*************************************************************************************************************************/ /*************************************** END OF KERNELS ***************************************************************/ /*************************************************************************************************************************/ //Initialization for MemcpyDeviceToDevice, for Processing AND Volume Rendering extern "C" void initRayCastCuda(void *d_volume, hipExtent volumeSize, hipMemcpyKind memcpyKind) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); if (!mallocVolumeArray) { hipStreamCreate(&renderStream); hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize); mallocVolumeArray = true; } // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(d_volume, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = memcpyKind; hipMemcpy3D(&copyParams); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = hipFilterModeLinear; // linear interpolation tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture hipBindTextureToArray(tex, d_volumeArray, channelDesc); } extern "C" void freeVolumeBuffers() { hipFreeArray(d_volumeArray); mallocVolumeArray = false; } extern "C" void rayCast_kernel(dim3 gridSize, dim3 blockSize, float *d_output, int imageW, int imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, renderStream, d_output, imageW, imageH, density, brightness, transferOffset, transferScale, voxelThreshold); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
78dacda874c1f0fcec8256cb86a893a2d03af931.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * Changes in this code has been editted to render gray levels only * and NOT RGB. */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_math.h> bool mallocVolumeArray = false; typedef unsigned int uint; typedef unsigned char uchar; cudaArray *d_volumeArray = 0; cudaStream_t renderStream; texture<float, 3, cudaReadModeElementType> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_render(float *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { const int maxSteps = 500; const float tstep = 0.01f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color __shared__ float sum[256]; __shared__ float subtractValue[256]; __shared__ float opacThreshold[256]; float t = tnear; int thrIdx = threadIdx.x; sum[thrIdx] = 0; subtractValue[thrIdx] = 0; opacThreshold[thrIdx] = 0.90f; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); sample *= 0.2f; if (sum[thrIdx]>0.0f) { subtractValue[thrIdx] += 0.01f; opacThreshold[thrIdx] -= 0.02f; } if (sum[thrIdx]==0.0f && sample > voxelThreshold) { sum[thrIdx] += sample; } else if (sum[threadIdx.x]>0.0f && sample - subtractValue[thrIdx] > 0.0f) { sum[thrIdx] += sample - subtractValue[thrIdx]; } if (sum[thrIdx] >= opacThreshold[thrIdx]) break; t += tstep; if (t > tfar) break; pos += step; } d_output[y*imageW + x] = sum[thrIdx]; } /*************************************************************************************************************************/ /*************************************** END OF KERNELS ***************************************************************/ /*************************************************************************************************************************/ //Initialization for MemcpyDeviceToDevice, for Processing AND Volume Rendering extern "C" void initRayCastCuda(void *d_volume, cudaExtent volumeSize, cudaMemcpyKind memcpyKind) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); if (!mallocVolumeArray) { cudaStreamCreate(&renderStream); cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize); mallocVolumeArray = true; } // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(d_volume, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = memcpyKind; cudaMemcpy3D(&copyParams); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = cudaFilterModeLinear; // linear interpolation tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture cudaBindTextureToArray(tex, d_volumeArray, channelDesc); } extern "C" void freeVolumeBuffers() { cudaFreeArray(d_volumeArray); mallocVolumeArray = false; } extern "C" void rayCast_kernel(dim3 gridSize, dim3 blockSize, float *d_output, int imageW, int imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { d_render<<<gridSize, blockSize, 0, renderStream>>>( d_output, imageW, imageH, density, brightness, transferOffset, transferScale, voxelThreshold); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
58628420f685cc9efcb21807cf82761aeb2cdbd1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "elementwise_1D_1D_add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in1 = NULL; hipMalloc(&in1, XSIZE*YSIZE); float *in2 = NULL; hipMalloc(&in2, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( elementwise_1D_1D_add), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( elementwise_1D_1D_add), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( elementwise_1D_1D_add), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
58628420f685cc9efcb21807cf82761aeb2cdbd1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "elementwise_1D_1D_add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in1 = NULL; cudaMalloc(&in1, XSIZE*YSIZE); float *in2 = NULL; cudaMalloc(&in2, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); elementwise_1D_1D_add<<<gridBlock,threadBlock>>>(in1,in2,out,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { elementwise_1D_1D_add<<<gridBlock,threadBlock>>>(in1,in2,out,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { elementwise_1D_1D_add<<<gridBlock,threadBlock>>>(in1,in2,out,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0ddf7af762cd5cc2922d8b630f9c605c984d1e15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> //void test(const double *ourplay_x, const double *ourplay_y, const double *ourplay_dir, const double *ourplay_vx, const double *ourplay_vy, const double *ourplay_vr, unsigned int size); void test(const double *ourplay_x, const double *ourplay_y, const double *ourplay_dir, const double *ourplay_vx, const double *ourplay_vy, const double *ourplay_vr, unsigned int size) { double *dev_ourplay_x; double *dev_ourplay_y; double *dev_ourplay_dir; double *dev_ourplay_vx; double *dev_ourplay_vy; double *dev_ourplay_vr; hipSetDevice(0); hipMalloc((void**)&dev_ourplay_x, size * sizeof(double)); hipMalloc((void**)&dev_ourplay_y, size * sizeof(double)); hipMalloc((void**)&dev_ourplay_dir, size * sizeof(double)); hipMalloc((void**)&dev_ourplay_vx, size * sizeof(double)); hipMalloc((void**)&dev_ourplay_vy, size * sizeof(double)); hipMalloc((void**)&dev_ourplay_vr, size * sizeof(double)); hipMemcpy(dev_ourplay_x, ourplay_x, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_ourplay_y, ourplay_y, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_ourplay_dir, ourplay_dir, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_ourplay_vx, ourplay_vx, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_ourplay_vy, ourplay_vy, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_ourplay_vr, ourplay_vr, size * sizeof(double), hipMemcpyHostToDevice); //do nothing hipFree(dev_ourplay_x); hipFree(dev_ourplay_y); hipFree(dev_ourplay_dir); hipFree(dev_ourplay_vx); hipFree(dev_ourplay_vy); hipFree(dev_ourplay_vr); } void main() { const unsigned int num = 12; double ourplay_x[num] = { 0 }; double ourplay_y[num] = { 0 }; double ourplay_dir[num] = { 0 }; double ourplay_vx[num] = { 0 }; double ourplay_vy[num] = { 0 }; double ourplay_vr[num] = { 0 }; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); test(ourplay_x, ourplay_y, ourplay_dir, ourplay_vx, ourplay_vy, ourplay_vr, num); hipEventRecord(stop, 0); //confirm that all things have been done before "stop event" hipEventSynchronize(stop); float elapseTime; hipEventElapsedTime(&elapseTime, start, stop); printf("Time for I/O : %.5f ms\n", elapseTime); system("pause"); }
0ddf7af762cd5cc2922d8b630f9c605c984d1e15.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> //void test(const double *ourplay_x, const double *ourplay_y, const double *ourplay_dir, const double *ourplay_vx, const double *ourplay_vy, const double *ourplay_vr, unsigned int size); void test(const double *ourplay_x, const double *ourplay_y, const double *ourplay_dir, const double *ourplay_vx, const double *ourplay_vy, const double *ourplay_vr, unsigned int size) { double *dev_ourplay_x; double *dev_ourplay_y; double *dev_ourplay_dir; double *dev_ourplay_vx; double *dev_ourplay_vy; double *dev_ourplay_vr; cudaSetDevice(0); cudaMalloc((void**)&dev_ourplay_x, size * sizeof(double)); cudaMalloc((void**)&dev_ourplay_y, size * sizeof(double)); cudaMalloc((void**)&dev_ourplay_dir, size * sizeof(double)); cudaMalloc((void**)&dev_ourplay_vx, size * sizeof(double)); cudaMalloc((void**)&dev_ourplay_vy, size * sizeof(double)); cudaMalloc((void**)&dev_ourplay_vr, size * sizeof(double)); cudaMemcpy(dev_ourplay_x, ourplay_x, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_ourplay_y, ourplay_y, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_ourplay_dir, ourplay_dir, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_ourplay_vx, ourplay_vx, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_ourplay_vy, ourplay_vy, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_ourplay_vr, ourplay_vr, size * sizeof(double), cudaMemcpyHostToDevice); //do nothing cudaFree(dev_ourplay_x); cudaFree(dev_ourplay_y); cudaFree(dev_ourplay_dir); cudaFree(dev_ourplay_vx); cudaFree(dev_ourplay_vy); cudaFree(dev_ourplay_vr); } void main() { const unsigned int num = 12; double ourplay_x[num] = { 0 }; double ourplay_y[num] = { 0 }; double ourplay_dir[num] = { 0 }; double ourplay_vx[num] = { 0 }; double ourplay_vy[num] = { 0 }; double ourplay_vr[num] = { 0 }; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); test(ourplay_x, ourplay_y, ourplay_dir, ourplay_vx, ourplay_vy, ourplay_vr, num); cudaEventRecord(stop, 0); //confirm that all things have been done before "stop event" cudaEventSynchronize(stop); float elapseTime; cudaEventElapsedTime(&elapseTime, start, stop); printf("Time for I/O : %.5f ms\n", elapseTime); system("pause"); }
0bfd59ffab1d9211e88669a61bb7d10c775006b3.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <benchmark/benchmark.h> #include <vector> #include "dali/benchmark/dali_bench.h" #include "dali/kernels/slice/slice_gpu.cuh" #include "dali/test/tensor_test_utils.h" #include "dali/test/test_tensors.h" #include "dali/kernels/scratch.h" namespace dali { constexpr int Dims = 3; using InputType = float; using OutputType = float; class SliceBenchGPU : public DALIBenchmark { public: kernels::TestTensorList<InputType, Dims> test_data; kernels::TestTensorList<OutputType, Dims> out_data; void Setup(const TensorShape<Dims> &in_shape, const TensorShape<Dims> &out_shape, int batch_size = 1) { test_data.reshape(uniform_list_shape<Dims>(batch_size, in_shape)); InputType num = 0; auto seq_gen = [&num]() { return num++; }; Fill(test_data.cpu(), seq_gen); out_data.reshape(uniform_list_shape<Dims>(batch_size, out_shape)); } void RunGPU(benchmark::State& st) { int H = st.range(0); int W = st.range(1); int C = st.range(2); int anchor_h = st.range(3); int anchor_w = st.range(4); int anchor_c = st.range(5); int crop_h = st.range(6); int crop_w = st.range(7); int crop_c = st.range(8); int batch_size = st.range(9); TensorShape<Dims> in_shape{H, W, C}; TensorShape<Dims> anchor{anchor_h, anchor_w, anchor_c}; TensorShape<Dims> out_shape{crop_h, crop_w, crop_c}; Setup(in_shape, out_shape, batch_size); using Kernel = kernels::SliceGPU<OutputType, InputType, Dims>; Kernel kernel; std::vector<kernels::SliceArgs<OutputType, Dims>> args_vec(batch_size); for (auto &args : args_vec) { args.anchor = anchor; args.shape = out_shape; } auto out_tv = out_data.gpu(); auto in_tv = test_data.gpu(); for (auto _ : st) { kernels::KernelContext ctx; ctx.gpu.stream = 0; auto req = kernel.Setup(ctx, in_tv, args_vec); kernels::ScratchpadAllocator scratch_alloc; scratch_alloc.Reserve(req.scratch_sizes); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; kernel.Run(ctx, out_tv, in_tv, args_vec); CUDA_CALL(hipStreamSynchronize(ctx.gpu.stream)); st.counters["FPS"] = benchmark::Counter(st.iterations() + 1, benchmark::Counter::kIsRate); } } }; static void SliceKernelArgs_GPU_OnlySlice(benchmark::internal::Benchmark *b) { for (int H = 1000; H >= 500; H /= 2) { int W = H, C = 3; int crop_h = 9 * H / 10; int crop_w = 9 * W / 10; b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 1}); b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 10}); } } BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlySlice)(benchmark::State& st) { this->RunGPU(st); } BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlySlice)->Iterations(1000) ->Unit(benchmark::kMicrosecond) ->UseRealTime() ->Apply(SliceKernelArgs_GPU_OnlySlice); static void SliceKernelArgs_GPU_OnlyPad(benchmark::internal::Benchmark *b) { for (int H = 1000; H >= 500; H /= 2) { int W = H, C = 3; int crop_h = 9 * H / 10; int crop_w = 9 * W / 10; int anchor_h = H; int anchor_w = W; b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1}); b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10}); } } BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlyPad)(benchmark::State& st) { this->RunGPU(st); } BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlyPad)->Iterations(1000) ->Unit(benchmark::kMicrosecond) ->UseRealTime() ->Apply(SliceKernelArgs_GPU_OnlyPad); static void SliceKernelArgs_GPU_SliceAndPad(benchmark::internal::Benchmark *b) { for (int H = 1000; H >= 500; H /= 2) { int W = H, C = 3; int crop_h = 9 * H / 10; int crop_w = 9 * W / 10; int anchor_h = H / 2; int anchor_w = W / 2; b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1}); b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10}); } } BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_SliceAndPad)(benchmark::State& st) { this->RunGPU(st); } BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_SliceAndPad)->Iterations(1000) ->Unit(benchmark::kMicrosecond) ->UseRealTime() ->Apply(SliceKernelArgs_GPU_SliceAndPad); } // namespace dali
0bfd59ffab1d9211e88669a61bb7d10c775006b3.cu
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <benchmark/benchmark.h> #include <vector> #include "dali/benchmark/dali_bench.h" #include "dali/kernels/slice/slice_gpu.cuh" #include "dali/test/tensor_test_utils.h" #include "dali/test/test_tensors.h" #include "dali/kernels/scratch.h" namespace dali { constexpr int Dims = 3; using InputType = float; using OutputType = float; class SliceBenchGPU : public DALIBenchmark { public: kernels::TestTensorList<InputType, Dims> test_data; kernels::TestTensorList<OutputType, Dims> out_data; void Setup(const TensorShape<Dims> &in_shape, const TensorShape<Dims> &out_shape, int batch_size = 1) { test_data.reshape(uniform_list_shape<Dims>(batch_size, in_shape)); InputType num = 0; auto seq_gen = [&num]() { return num++; }; Fill(test_data.cpu(), seq_gen); out_data.reshape(uniform_list_shape<Dims>(batch_size, out_shape)); } void RunGPU(benchmark::State& st) { int H = st.range(0); int W = st.range(1); int C = st.range(2); int anchor_h = st.range(3); int anchor_w = st.range(4); int anchor_c = st.range(5); int crop_h = st.range(6); int crop_w = st.range(7); int crop_c = st.range(8); int batch_size = st.range(9); TensorShape<Dims> in_shape{H, W, C}; TensorShape<Dims> anchor{anchor_h, anchor_w, anchor_c}; TensorShape<Dims> out_shape{crop_h, crop_w, crop_c}; Setup(in_shape, out_shape, batch_size); using Kernel = kernels::SliceGPU<OutputType, InputType, Dims>; Kernel kernel; std::vector<kernels::SliceArgs<OutputType, Dims>> args_vec(batch_size); for (auto &args : args_vec) { args.anchor = anchor; args.shape = out_shape; } auto out_tv = out_data.gpu(); auto in_tv = test_data.gpu(); for (auto _ : st) { kernels::KernelContext ctx; ctx.gpu.stream = 0; auto req = kernel.Setup(ctx, in_tv, args_vec); kernels::ScratchpadAllocator scratch_alloc; scratch_alloc.Reserve(req.scratch_sizes); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; kernel.Run(ctx, out_tv, in_tv, args_vec); CUDA_CALL(cudaStreamSynchronize(ctx.gpu.stream)); st.counters["FPS"] = benchmark::Counter(st.iterations() + 1, benchmark::Counter::kIsRate); } } }; static void SliceKernelArgs_GPU_OnlySlice(benchmark::internal::Benchmark *b) { for (int H = 1000; H >= 500; H /= 2) { int W = H, C = 3; int crop_h = 9 * H / 10; int crop_w = 9 * W / 10; b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 1}); b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 10}); } } BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlySlice)(benchmark::State& st) { this->RunGPU(st); } BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlySlice)->Iterations(1000) ->Unit(benchmark::kMicrosecond) ->UseRealTime() ->Apply(SliceKernelArgs_GPU_OnlySlice); static void SliceKernelArgs_GPU_OnlyPad(benchmark::internal::Benchmark *b) { for (int H = 1000; H >= 500; H /= 2) { int W = H, C = 3; int crop_h = 9 * H / 10; int crop_w = 9 * W / 10; int anchor_h = H; int anchor_w = W; b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1}); b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10}); } } BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlyPad)(benchmark::State& st) { this->RunGPU(st); } BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlyPad)->Iterations(1000) ->Unit(benchmark::kMicrosecond) ->UseRealTime() ->Apply(SliceKernelArgs_GPU_OnlyPad); static void SliceKernelArgs_GPU_SliceAndPad(benchmark::internal::Benchmark *b) { for (int H = 1000; H >= 500; H /= 2) { int W = H, C = 3; int crop_h = 9 * H / 10; int crop_w = 9 * W / 10; int anchor_h = H / 2; int anchor_w = W / 2; b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1}); b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10}); } } BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_SliceAndPad)(benchmark::State& st) { this->RunGPU(st); } BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_SliceAndPad)->Iterations(1000) ->Unit(benchmark::kMicrosecond) ->UseRealTime() ->Apply(SliceKernelArgs_GPU_SliceAndPad); } // namespace dali
55eb522bc7c21b3cf7c6ec0a0ef4f248c52d4e44.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #define N 256 __global__ void matrix_vector_multi_gpu_1_1(float *A_d, float *B_d, float *C_d) { int i,j; for(j=0;j<N;j++){ A_d[j] = 0.0F; for(i=0;i<N;i++) { A_d[j] = A_d[j]+B_d[j*N+i]*C_d[i]; } } } __global__ void matrix_vector_multi_gpu_1_1_sh(float *A_d, float *B_d, float *C_d) { int i; __shared__ float tmp_c[N]; tmp_c[threadIdx.x] = C_d[threadIdx.x]; __syncthreads(); A_d[threadIdx.x] = 0.0F; for(i=0;i<N;i++){ A_d[threadIdx.x] = A_d[threadIdx.x] + B_d[threadIdx.x*N+1]*tmp_c[i]; } } int main(int argc, char **argv) { // set up device int dev = 0; unsigned int t, travdirtime; int i,j; float A[N], B[N*N], C[N]; float *A_d, *B_d, *C_d; dim3 blocks(1,1,1); dim3 threads(1,1,1); for(j=0;j<N;j++) { for(i=0;i<N;i++) { B[j*N+i] = ((float)j)/256.0; } } for(j=0;j<N;j++) C[j] = 1.0F; hipMalloc((void **)&A_d, N*sizeof(float)); hipMalloc((void **)&B_d, N*N*sizeof(float)); hipMalloc((void **)&C_d, N*sizeof(float)); hipMemcpy(A_d, A, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(B_d, B, N*N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(C_d, C, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( matrix_vector_multi_gpu_1_1), dim3(blocks), dim3(threads), 0, 0, A_d, B_d, C_d); hipLaunchKernelGGL(( matrix_vector_multi_gpu_1_1_sh), dim3(blocks), dim3(threads), 0, 0, A_d, B_d, C_d); hipFree(A_d); hipFree(B_d); hipFree(C_d); return EXIT_SUCCESS; }
55eb522bc7c21b3cf7c6ec0a0ef4f248c52d4e44.cu
#include <cuda_runtime.h> #include <stdio.h> #define N 256 __global__ void matrix_vector_multi_gpu_1_1(float *A_d, float *B_d, float *C_d) { int i,j; for(j=0;j<N;j++){ A_d[j] = 0.0F; for(i=0;i<N;i++) { A_d[j] = A_d[j]+B_d[j*N+i]*C_d[i]; } } } __global__ void matrix_vector_multi_gpu_1_1_sh(float *A_d, float *B_d, float *C_d) { int i; __shared__ float tmp_c[N]; tmp_c[threadIdx.x] = C_d[threadIdx.x]; __syncthreads(); A_d[threadIdx.x] = 0.0F; for(i=0;i<N;i++){ A_d[threadIdx.x] = A_d[threadIdx.x] + B_d[threadIdx.x*N+1]*tmp_c[i]; } } int main(int argc, char **argv) { // set up device int dev = 0; unsigned int t, travdirtime; int i,j; float A[N], B[N*N], C[N]; float *A_d, *B_d, *C_d; dim3 blocks(1,1,1); dim3 threads(1,1,1); for(j=0;j<N;j++) { for(i=0;i<N;i++) { B[j*N+i] = ((float)j)/256.0; } } for(j=0;j<N;j++) C[j] = 1.0F; cudaMalloc((void **)&A_d, N*sizeof(float)); cudaMalloc((void **)&B_d, N*N*sizeof(float)); cudaMalloc((void **)&C_d, N*sizeof(float)); cudaMemcpy(A_d, A, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_d, B, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(C_d, C, N*sizeof(float), cudaMemcpyHostToDevice); matrix_vector_multi_gpu_1_1<<<blocks, threads>>>(A_d, B_d, C_d); matrix_vector_multi_gpu_1_1_sh<<<blocks, threads>>>(A_d, B_d, C_d); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); return EXIT_SUCCESS; }
cc0bbf7357793e30aa65387faed8de223113f43a.hip
// !!! This is a file automatically generated by hipify!!! #include "opencvcuda.h" //opncv resize opencv_cudawarpingresize, using namespace cv; using namespace cv::cuda; using namespace cv::cuda::device; #define BLOCK_X 32 #define BLOCK_Y 8 const dim3 block = dim3(BLOCK_X, BLOCK_Y); template <typename T> void resize_gpu(PtrStepSz<T> source, PtrStepSz<T> dest, bool bLinear, hipStream_t stream) { float fx = static_cast<float>(source.cols) / dest.cols; float fy = static_cast<float>(source.rows) / dest.rows; dim3 grid(divUp(dest.cols, block.x), divUp(dest.rows, block.y)); if (bLinear) { resize_linear<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } else { resize_nearest<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } } // template void resize_gpu<uchar4>(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool bLinear, hipStream_t stream); template void resize_gpu<uchar>(PtrStepSz<uchar> source, PtrStepSz<uchar> dest, bool bLinear, hipStream_t stream); template void resize_gpu<float4>(PtrStepSz<float4> source, PtrStepSz<float4> dest, bool bLinear, hipStream_t stream); //void resize_gpuf(PtrStepSz<float4> source, PtrStepSz<float4> dest, bool bLinear, hipStream_t stream) { // float fx = static_cast<float>(source.cols) / dest.cols; // float fy = static_cast<float>(source.rows) / dest.rows; // dim3 grid(divUp(dest.cols, block.x), divUp(dest.rows, block.y)); // if (bLinear) { // resize_linear<float4> << <grid, block, 0, stream >> > (source, dest, fx, fy); // } // else { // resize_nearest<float4> << <grid, block, 0, stream >> > (source, dest, fx, fy); // } //}
cc0bbf7357793e30aa65387faed8de223113f43a.cu
#include "opencvcuda.h" //opncv resize需要引入 opencv_cudawarping,包含几乎各种类型的resize模板,生成出来太大,在这不需要,单独拉出来 using namespace cv; using namespace cv::cuda; using namespace cv::cuda::device; #define BLOCK_X 32 #define BLOCK_Y 8 const dim3 block = dim3(BLOCK_X, BLOCK_Y); template <typename T> void resize_gpu(PtrStepSz<T> source, PtrStepSz<T> dest, bool bLinear, cudaStream_t stream) { float fx = static_cast<float>(source.cols) / dest.cols; float fy = static_cast<float>(source.rows) / dest.rows; dim3 grid(divUp(dest.cols, block.x), divUp(dest.rows, block.y)); if (bLinear) { resize_linear<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } else { resize_nearest<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } } //实例化几个 template void resize_gpu<uchar4>(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool bLinear, cudaStream_t stream); template void resize_gpu<uchar>(PtrStepSz<uchar> source, PtrStepSz<uchar> dest, bool bLinear, cudaStream_t stream); template void resize_gpu<float4>(PtrStepSz<float4> source, PtrStepSz<float4> dest, bool bLinear, cudaStream_t stream); //void resize_gpuf(PtrStepSz<float4> source, PtrStepSz<float4> dest, bool bLinear, cudaStream_t stream) { // float fx = static_cast<float>(source.cols) / dest.cols; // float fy = static_cast<float>(source.rows) / dest.rows; // dim3 grid(divUp(dest.cols, block.x), divUp(dest.rows, block.y)); // if (bLinear) { // resize_linear<float4> << <grid, block, 0, stream >> > (source, dest, fx, fy); // } // else { // resize_nearest<float4> << <grid, block, 0, stream >> > (source, dest, fx, fy); // } //}
ef7b95c31640f3507bb9d89902cbab9344fbbe40.hip
// !!! This is a file automatically generated by hipify!!! /* * L3.cu * * Created on: Mar 12, 2015 * Author: mvc */ #include "L3.h" L3::L3() { // TODO Auto-generated constructor stub } L3::~L3() { // TODO Auto-generated destructor stub } #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <errno.h> #include <unistd.h> #include <dirent.h> #include <sys/time.h> #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> using namespace std; // Define constant parameters #define cfa_size 4 #define num_filter 5 #define patch_size 9 #define border_size 4 #define image_width 720 #define image_height 1280 #define voltage_max 0.9734 #define lum_list_size 20 #define num_out 3 #define low 0.95 #define high 1.15 #define sat_levels 19 #define num_frames 100 /* Cuda function - L3Render Compute mean for each channel Inputs: out_image - pre-allocated space for output (xyz) image image - original image cfa - cfa pattern, should be 0-indexed lum_list - luminance list sat_list - saturation list flat_filters - filters for flat regions texture_filters - filters for texture regions */ __global__ void L3Render(float* const out_image, float const * const image, float const * const lum_list, float const * const sat_list, float const * const flat_filters, float const * const texture_filters, float const * const flat_threshold_list) { // Find pixel position const int row = blockIdx.x; const int col = threadIdx.x; const size_t out_pixel_index = num_out*(row + col * image_height); // Check pixel range if ((row < border_size) || (row >= image_height - border_size) || (col < border_size) || (col >= image_width - border_size)){ return; } // Compute patch type const unsigned short patch_type[] = {row % cfa_size, col % cfa_size}; // patch type const unsigned short cfa[] = {1, 0, 1, 3, 4, 1, 2, 1, 1, 3, 1, 0, 2, 1, 4, 1}; // cfa pattern // Store patch data in image to local memory float patch_data[patch_size*patch_size]; // Compute mean for each channel float channel_mean[num_filter] = {0.0}; unsigned short channel_count[num_filter] = {0}; unsigned short cfa_index[patch_size * patch_size]; unsigned short index = 0; unsigned short col_index = (col - border_size) % cfa_size; unsigned short row_index = (row - border_size) % cfa_size; size_t pixel_index = (row - border_size) + (col - border_size)*image_height; bool is_sat[num_filter] = {false}; for (short jj = -border_size; jj <= border_size; jj++){ unsigned short j_index = col_index * cfa_size; unsigned short i_index = row_index; for (short ii = -border_size; ii <= border_size; ii++){ cfa_index[index] = i_index + j_index; channel_count[cfa[cfa_index[index]]] += 1; patch_data[index] = image[pixel_index]; channel_mean[cfa[cfa_index[index]]] += patch_data[index]; is_sat[cfa[cfa_index[index]]] |= (patch_data[index] > voltage_max); index++; pixel_index++; i_index = (i_index + 1) % cfa_size; } pixel_index += image_height-2*border_size-1; col_index = (col_index + 1) % cfa_size; } // Compute channel mean luminance // Channel mean luminance is defined as the mean of channel_mean float lum_mean = 0; for (int ii = 0; ii < num_filter; ii++) { channel_mean[ii] /= channel_count[ii]; lum_mean += channel_mean[ii]; } lum_mean /= num_filter; // Convert luminance to luminance index // Binary search might be faster for large nubmer of luminance levels // But this difference can be ignored when we have only around 20 luminance levels unsigned short lum_index = lum_list_size - 1; for (int ii = 0; ii < lum_list_size; ii++) { if (lum_mean < lum_list[ii]) { lum_index = ii; break; } } // Compute saturation type unsigned short sat_type = 0; // sat_type is the encoded saturation type unsigned short sat_index; // sat_index is the number found with sat_list // const unsigned short sat_list_size = (1 << num_filter); for (int ii = num_filter - 1; ii >= 0; ii --) sat_type = (sat_type << 1) + is_sat[ii]; // (channel_mean[ii] > voltage_max); const float *cur_sat_list = sat_list + ((patch_type[1] * cfa_size + patch_type[0]) << num_filter); sat_index = cur_sat_list[sat_type]; // Find nearest sat_type for missing ones const unsigned short sat_list_size = (1 << num_filter); if (sat_index == 0){ float min_cost = 10000; // Init min cost to some arbitrarily large value for (int ii = 0; ii < sat_list_size; ii++) { if (cur_sat_list[ii] != 0){ // compute cost float cur_cost = 0; unsigned short sat_diff = (ii ^ sat_type); for (int jj = 0; sat_diff > 0; jj++) { if ((sat_diff & 1) > 0) cur_cost += fabsf(channel_mean[jj] - voltage_max); sat_diff = (sat_diff >> 1); } if (cur_cost < min_cost) { min_cost = cur_cost; sat_index = cur_sat_list[ii]; } } } } sat_index--; // make sat_index 0-indexed // Compute image contrast // Assume image_contrast array has been allocated as zeros float image_contrast = 0; for (index = 0; index < patch_size * patch_size; index++) image_contrast += fabsf(patch_data[index] - channel_mean[cfa[cfa_index[index]]]); image_contrast /= (patch_size * patch_size); // Determine flat or texture const int threshold_index = ((sat_index * lum_list_size + lum_index) * cfa_size + patch_type[1]) * cfa_size + patch_type[0]; const float flat_threshold = flat_threshold_list[threshold_index]; // Apply filter to patch const float *filter; float out_data[num_out] = {0}; unsigned short filter_index; unsigned int filter_offset = threshold_index * num_out * patch_size * patch_size; if (image_contrast < flat_threshold * low) { // flat region filter = flat_filters + filter_offset; for (index = 0, filter_index = 0; index < patch_size * patch_size; index++, filter_index += 3){ out_data[0] += patch_data[index] * filter[filter_index]; out_data[1] += patch_data[index] * filter[filter_index+1]; out_data[2] += patch_data[index] * filter[filter_index+2]; } } else if (image_contrast > flat_threshold * high) { // texture region filter = texture_filters + filter_offset; for (index = 0, filter_index = 0; index < patch_size * patch_size; index++, filter_index += 3){ out_data[0] += patch_data[index] * filter[filter_index]; out_data[1] += patch_data[index] * filter[filter_index+1]; out_data[2] += patch_data[index] * filter[filter_index+2]; } } else { // transition region const float weights = (image_contrast / flat_threshold - low) / (high - low); filter = flat_filters + filter_offset; const float* filter_texture = texture_filters + filter_offset; for (index = 0, filter_index = 0; index < patch_size * patch_size; index ++, filter_index += 3){ out_data[0] += patch_data[index] * (filter[filter_index] * weights + filter_texture[filter_index] * (1 - weights)); out_data[1] += patch_data[index] * (filter[filter_index+1] * weights + filter_texture[filter_index+1] * (1 - weights)); out_data[2] += patch_data[index] * (filter[filter_index+2] * weights + filter_texture[filter_index+2] * (1 - weights)); } } out_image[out_pixel_index] = out_data[0]; out_image[out_pixel_index + 1] = out_data[1]; out_image[out_pixel_index + 2] = out_data[2]; } // Main routine int L3::L3_main(void) { LOGD("hello"); // Init parameters float * out_image, * out_image_d; // pointer to rendered image float * image, * image_d; // pointer to input raw image data float * lum_list, * lum_list_d; // pointer to luminance list float * sat_list, * sat_list_d; // pointer to saturation list float * flat_filters, * flat_filters_d; // pointer to filters in flat region float * texture_filters, * texture_filters_d; // pointer to filters in texture region float * flat_threshold_list, * flat_threshold_list_d; // the list of thresholds of determining a patch is flat or not const unsigned short sat_list_size = (1 << num_filter)*cfa_size*cfa_size; const unsigned int flat_filters_size = num_out * patch_size * patch_size * lum_list_size * cfa_size * cfa_size * sat_levels; const unsigned int texture_filters_size = flat_filters_size; const unsigned int flat_threshold_list_size = lum_list_size*cfa_size*cfa_size*sat_levels; LOGD("hello 1"); // Allocate spaces in main memory image = (float*)malloc(sizeof(float)*image_width*image_height); out_image = (float*) malloc(sizeof(float)*image_height*image_width*num_out); lum_list = (float*) malloc(sizeof(float)*lum_list_size); sat_list = (float*) malloc(sizeof(float)*sat_list_size); flat_filters = (float*) malloc(sizeof(float) * flat_filters_size); texture_filters = (float*)malloc(sizeof(float)* texture_filters_size); flat_threshold_list = (float*)malloc(sizeof(float)*flat_threshold_list_size); LOGD("hello 2"); // Load data from files FILE* pf; pf = fopen("/sdcard/cudadata/lum_list.dat", "rb"); // luminance list LOGD("hello 3"); fread(lum_list, sizeof(float), lum_list_size, pf); LOGD("hello 4"); fclose(pf); pf = fopen("/sdcard/cudadata/sat_list.dat", "rb"); // saturation list fread(sat_list, sizeof(float), sat_list_size, pf); fclose(pf); pf = fopen("/sdcard/cudadata/flat_filters.dat", "rb"); //flat filters fread(flat_filters, sizeof(float), flat_filters_size, pf); fclose(pf); pf = fopen("/sdcard/cudadata/texture_filters.dat", "rb"); // texture filters fread(texture_filters, sizeof(float), texture_filters_size, pf); fclose(pf); pf = fopen("/sdcard/cudadata/flat_threshold_list.dat", "rb"); // flat threshold list fread(flat_threshold_list, sizeof(float), flat_threshold_list_size, pf); fclose(pf); // Allocate spaces in GPU hipMalloc((void **) & out_image_d, sizeof(float)*image_width*image_height*num_out); hipMalloc((void **) & image_d, sizeof(float)*image_height * image_width); hipMalloc((void **) & lum_list_d, sizeof(float)*lum_list_size); hipMalloc((void **) & sat_list_d, sizeof(float)*sat_list_size); hipMalloc((void **) & flat_filters_d, sizeof(float)*flat_filters_size); hipMalloc((void **) & texture_filters_d, sizeof(float)*texture_filters_size); hipMalloc((void **) & flat_threshold_list_d, sizeof(float)*flat_threshold_list_size); // Copy data to GPU hipMemcpy(lum_list_d, lum_list, sizeof(float)*lum_list_size, hipMemcpyHostToDevice); hipMemcpy(sat_list_d, sat_list, sizeof(float)*sat_list_size, hipMemcpyHostToDevice); hipMemcpy(flat_filters_d, flat_filters, sizeof(float)*flat_filters_size, hipMemcpyHostToDevice); hipMemcpy(texture_filters_d, texture_filters, sizeof(float)*texture_filters_size, hipMemcpyHostToDevice); hipMemcpy(flat_threshold_list_d, flat_threshold_list, sizeof(float)*flat_threshold_list_size, hipMemcpyHostToDevice); char *fName = new char[100]; struct timeval tm1, tm2; for (int fIndex = 0; fIndex < num_frames; fIndex ++ ){ //Runtime including IO //gettimeofday(&tm1, NULL); //LOGD("frame # %d", fIndex); // show debug info printf("Processing frame %d...\n", fIndex); // Load image // sprintf(fName, "./video/output_%07d.dat", fIndex); sprintf(fName, "/sdcard/cudadata/raw_image.dat", fIndex); pf = fopen(fName, "rb"); // image raw data fread(image, sizeof(float), image_width * image_height, pf); fclose(pf); hipMemcpy(image_d, image, sizeof(float)*image_height*image_width, hipMemcpyHostToDevice); hipMemset(out_image_d, 0, image_width*image_height*num_out*sizeof(float)); gettimeofday(&tm1, NULL); // Do computation in GPU hipLaunchKernelGGL(( L3Render), dim3(image_height), dim3(image_width), 0, 0, out_image_d, image_d, lum_list_d, sat_list_d, flat_filters_d, texture_filters_d, flat_threshold_list_d); // Copy back to main memory hipMemcpy(out_image, out_image_d, sizeof(float)*image_width*image_height*num_out, hipMemcpyDeviceToHost); gettimeofday(&tm2, NULL); unsigned long long t = 1000000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec); LOGD("time lapse: %llu", t); // Write rendered image to file // sprintf(fName, "./video_out/frame%07d.dat", fIndex); sprintf(fName, "/sdcard/cudadata/raw_image.dat", fIndex); pf = fopen(fName, "wb"); fwrite(out_image, sizeof(float), image_height * image_width * num_out, pf); fclose(pf); //runtime including IO /*gettimeofday(&tm2, NULL); unsigned long long t = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000; LOGD("time lapse: %llu", t);*/ } LOGD("done"); // Cleanup and return free(out_image); hipFree(out_image_d); free(image); hipFree(image_d); free(lum_list); hipFree(lum_list_d); free(sat_list); hipFree(sat_list_d); free(flat_filters); hipFree(flat_filters_d); free(texture_filters); hipFree(texture_filters_d); free(flat_threshold_list); hipFree(flat_threshold_list_d); LOGD("done 4"); //hipDeviceReset(); LOGD("blah 4"); return 0; }
ef7b95c31640f3507bb9d89902cbab9344fbbe40.cu
/* * L3.cu * * Created on: Mar 12, 2015 * Author: mvc */ #include "L3.h" L3::L3() { // TODO Auto-generated constructor stub } L3::~L3() { // TODO Auto-generated destructor stub } #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <errno.h> #include <unistd.h> #include <dirent.h> #include <sys/time.h> #include "cuda.h" #include <cuda_runtime.h> using namespace std; // Define constant parameters #define cfa_size 4 #define num_filter 5 #define patch_size 9 #define border_size 4 #define image_width 720 #define image_height 1280 #define voltage_max 0.9734 #define lum_list_size 20 #define num_out 3 #define low 0.95 #define high 1.15 #define sat_levels 19 #define num_frames 100 /* Cuda function - L3Render Compute mean for each channel Inputs: out_image - pre-allocated space for output (xyz) image image - original image cfa - cfa pattern, should be 0-indexed lum_list - luminance list sat_list - saturation list flat_filters - filters for flat regions texture_filters - filters for texture regions */ __global__ void L3Render(float* const out_image, float const * const image, float const * const lum_list, float const * const sat_list, float const * const flat_filters, float const * const texture_filters, float const * const flat_threshold_list) { // Find pixel position const int row = blockIdx.x; const int col = threadIdx.x; const size_t out_pixel_index = num_out*(row + col * image_height); // Check pixel range if ((row < border_size) || (row >= image_height - border_size) || (col < border_size) || (col >= image_width - border_size)){ return; } // Compute patch type const unsigned short patch_type[] = {row % cfa_size, col % cfa_size}; // patch type const unsigned short cfa[] = {1, 0, 1, 3, 4, 1, 2, 1, 1, 3, 1, 0, 2, 1, 4, 1}; // cfa pattern // Store patch data in image to local memory float patch_data[patch_size*patch_size]; // Compute mean for each channel float channel_mean[num_filter] = {0.0}; unsigned short channel_count[num_filter] = {0}; unsigned short cfa_index[patch_size * patch_size]; unsigned short index = 0; unsigned short col_index = (col - border_size) % cfa_size; unsigned short row_index = (row - border_size) % cfa_size; size_t pixel_index = (row - border_size) + (col - border_size)*image_height; bool is_sat[num_filter] = {false}; for (short jj = -border_size; jj <= border_size; jj++){ unsigned short j_index = col_index * cfa_size; unsigned short i_index = row_index; for (short ii = -border_size; ii <= border_size; ii++){ cfa_index[index] = i_index + j_index; channel_count[cfa[cfa_index[index]]] += 1; patch_data[index] = image[pixel_index]; channel_mean[cfa[cfa_index[index]]] += patch_data[index]; is_sat[cfa[cfa_index[index]]] |= (patch_data[index] > voltage_max); index++; pixel_index++; i_index = (i_index + 1) % cfa_size; } pixel_index += image_height-2*border_size-1; col_index = (col_index + 1) % cfa_size; } // Compute channel mean luminance // Channel mean luminance is defined as the mean of channel_mean float lum_mean = 0; for (int ii = 0; ii < num_filter; ii++) { channel_mean[ii] /= channel_count[ii]; lum_mean += channel_mean[ii]; } lum_mean /= num_filter; // Convert luminance to luminance index // Binary search might be faster for large nubmer of luminance levels // But this difference can be ignored when we have only around 20 luminance levels unsigned short lum_index = lum_list_size - 1; for (int ii = 0; ii < lum_list_size; ii++) { if (lum_mean < lum_list[ii]) { lum_index = ii; break; } } // Compute saturation type unsigned short sat_type = 0; // sat_type is the encoded saturation type unsigned short sat_index; // sat_index is the number found with sat_list // const unsigned short sat_list_size = (1 << num_filter); for (int ii = num_filter - 1; ii >= 0; ii --) sat_type = (sat_type << 1) + is_sat[ii]; // (channel_mean[ii] > voltage_max); const float *cur_sat_list = sat_list + ((patch_type[1] * cfa_size + patch_type[0]) << num_filter); sat_index = cur_sat_list[sat_type]; // Find nearest sat_type for missing ones const unsigned short sat_list_size = (1 << num_filter); if (sat_index == 0){ float min_cost = 10000; // Init min cost to some arbitrarily large value for (int ii = 0; ii < sat_list_size; ii++) { if (cur_sat_list[ii] != 0){ // compute cost float cur_cost = 0; unsigned short sat_diff = (ii ^ sat_type); for (int jj = 0; sat_diff > 0; jj++) { if ((sat_diff & 1) > 0) cur_cost += fabsf(channel_mean[jj] - voltage_max); sat_diff = (sat_diff >> 1); } if (cur_cost < min_cost) { min_cost = cur_cost; sat_index = cur_sat_list[ii]; } } } } sat_index--; // make sat_index 0-indexed // Compute image contrast // Assume image_contrast array has been allocated as zeros float image_contrast = 0; for (index = 0; index < patch_size * patch_size; index++) image_contrast += fabsf(patch_data[index] - channel_mean[cfa[cfa_index[index]]]); image_contrast /= (patch_size * patch_size); // Determine flat or texture const int threshold_index = ((sat_index * lum_list_size + lum_index) * cfa_size + patch_type[1]) * cfa_size + patch_type[0]; const float flat_threshold = flat_threshold_list[threshold_index]; // Apply filter to patch const float *filter; float out_data[num_out] = {0}; unsigned short filter_index; unsigned int filter_offset = threshold_index * num_out * patch_size * patch_size; if (image_contrast < flat_threshold * low) { // flat region filter = flat_filters + filter_offset; for (index = 0, filter_index = 0; index < patch_size * patch_size; index++, filter_index += 3){ out_data[0] += patch_data[index] * filter[filter_index]; out_data[1] += patch_data[index] * filter[filter_index+1]; out_data[2] += patch_data[index] * filter[filter_index+2]; } } else if (image_contrast > flat_threshold * high) { // texture region filter = texture_filters + filter_offset; for (index = 0, filter_index = 0; index < patch_size * patch_size; index++, filter_index += 3){ out_data[0] += patch_data[index] * filter[filter_index]; out_data[1] += patch_data[index] * filter[filter_index+1]; out_data[2] += patch_data[index] * filter[filter_index+2]; } } else { // transition region const float weights = (image_contrast / flat_threshold - low) / (high - low); filter = flat_filters + filter_offset; const float* filter_texture = texture_filters + filter_offset; for (index = 0, filter_index = 0; index < patch_size * patch_size; index ++, filter_index += 3){ out_data[0] += patch_data[index] * (filter[filter_index] * weights + filter_texture[filter_index] * (1 - weights)); out_data[1] += patch_data[index] * (filter[filter_index+1] * weights + filter_texture[filter_index+1] * (1 - weights)); out_data[2] += patch_data[index] * (filter[filter_index+2] * weights + filter_texture[filter_index+2] * (1 - weights)); } } out_image[out_pixel_index] = out_data[0]; out_image[out_pixel_index + 1] = out_data[1]; out_image[out_pixel_index + 2] = out_data[2]; } // Main routine int L3::L3_main(void) { LOGD("hello"); // Init parameters float * out_image, * out_image_d; // pointer to rendered image float * image, * image_d; // pointer to input raw image data float * lum_list, * lum_list_d; // pointer to luminance list float * sat_list, * sat_list_d; // pointer to saturation list float * flat_filters, * flat_filters_d; // pointer to filters in flat region float * texture_filters, * texture_filters_d; // pointer to filters in texture region float * flat_threshold_list, * flat_threshold_list_d; // the list of thresholds of determining a patch is flat or not const unsigned short sat_list_size = (1 << num_filter)*cfa_size*cfa_size; const unsigned int flat_filters_size = num_out * patch_size * patch_size * lum_list_size * cfa_size * cfa_size * sat_levels; const unsigned int texture_filters_size = flat_filters_size; const unsigned int flat_threshold_list_size = lum_list_size*cfa_size*cfa_size*sat_levels; LOGD("hello 1"); // Allocate spaces in main memory image = (float*)malloc(sizeof(float)*image_width*image_height); out_image = (float*) malloc(sizeof(float)*image_height*image_width*num_out); lum_list = (float*) malloc(sizeof(float)*lum_list_size); sat_list = (float*) malloc(sizeof(float)*sat_list_size); flat_filters = (float*) malloc(sizeof(float) * flat_filters_size); texture_filters = (float*)malloc(sizeof(float)* texture_filters_size); flat_threshold_list = (float*)malloc(sizeof(float)*flat_threshold_list_size); LOGD("hello 2"); // Load data from files FILE* pf; pf = fopen("/sdcard/cudadata/lum_list.dat", "rb"); // luminance list LOGD("hello 3"); fread(lum_list, sizeof(float), lum_list_size, pf); LOGD("hello 4"); fclose(pf); pf = fopen("/sdcard/cudadata/sat_list.dat", "rb"); // saturation list fread(sat_list, sizeof(float), sat_list_size, pf); fclose(pf); pf = fopen("/sdcard/cudadata/flat_filters.dat", "rb"); //flat filters fread(flat_filters, sizeof(float), flat_filters_size, pf); fclose(pf); pf = fopen("/sdcard/cudadata/texture_filters.dat", "rb"); // texture filters fread(texture_filters, sizeof(float), texture_filters_size, pf); fclose(pf); pf = fopen("/sdcard/cudadata/flat_threshold_list.dat", "rb"); // flat threshold list fread(flat_threshold_list, sizeof(float), flat_threshold_list_size, pf); fclose(pf); // Allocate spaces in GPU cudaMalloc((void **) & out_image_d, sizeof(float)*image_width*image_height*num_out); cudaMalloc((void **) & image_d, sizeof(float)*image_height * image_width); cudaMalloc((void **) & lum_list_d, sizeof(float)*lum_list_size); cudaMalloc((void **) & sat_list_d, sizeof(float)*sat_list_size); cudaMalloc((void **) & flat_filters_d, sizeof(float)*flat_filters_size); cudaMalloc((void **) & texture_filters_d, sizeof(float)*texture_filters_size); cudaMalloc((void **) & flat_threshold_list_d, sizeof(float)*flat_threshold_list_size); // Copy data to GPU cudaMemcpy(lum_list_d, lum_list, sizeof(float)*lum_list_size, cudaMemcpyHostToDevice); cudaMemcpy(sat_list_d, sat_list, sizeof(float)*sat_list_size, cudaMemcpyHostToDevice); cudaMemcpy(flat_filters_d, flat_filters, sizeof(float)*flat_filters_size, cudaMemcpyHostToDevice); cudaMemcpy(texture_filters_d, texture_filters, sizeof(float)*texture_filters_size, cudaMemcpyHostToDevice); cudaMemcpy(flat_threshold_list_d, flat_threshold_list, sizeof(float)*flat_threshold_list_size, cudaMemcpyHostToDevice); char *fName = new char[100]; struct timeval tm1, tm2; for (int fIndex = 0; fIndex < num_frames; fIndex ++ ){ //Runtime including IO //gettimeofday(&tm1, NULL); //LOGD("frame # %d", fIndex); // show debug info printf("Processing frame %d...\n", fIndex); // Load image // sprintf(fName, "./video/output_%07d.dat", fIndex); sprintf(fName, "/sdcard/cudadata/raw_image.dat", fIndex); pf = fopen(fName, "rb"); // image raw data fread(image, sizeof(float), image_width * image_height, pf); fclose(pf); cudaMemcpy(image_d, image, sizeof(float)*image_height*image_width, cudaMemcpyHostToDevice); cudaMemset(out_image_d, 0, image_width*image_height*num_out*sizeof(float)); gettimeofday(&tm1, NULL); // Do computation in GPU L3Render<<<image_height, image_width>>>(out_image_d, image_d, lum_list_d, sat_list_d, flat_filters_d, texture_filters_d, flat_threshold_list_d); // Copy back to main memory cudaMemcpy(out_image, out_image_d, sizeof(float)*image_width*image_height*num_out, cudaMemcpyDeviceToHost); gettimeofday(&tm2, NULL); unsigned long long t = 1000000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec); LOGD("time lapse: %llu", t); // Write rendered image to file // sprintf(fName, "./video_out/frame%07d.dat", fIndex); sprintf(fName, "/sdcard/cudadata/raw_image.dat", fIndex); pf = fopen(fName, "wb"); fwrite(out_image, sizeof(float), image_height * image_width * num_out, pf); fclose(pf); //runtime including IO /*gettimeofday(&tm2, NULL); unsigned long long t = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000; LOGD("time lapse: %llu", t);*/ } LOGD("done"); // Cleanup and return free(out_image); cudaFree(out_image_d); free(image); cudaFree(image_d); free(lum_list); cudaFree(lum_list_d); free(sat_list); cudaFree(sat_list_d); free(flat_filters); cudaFree(flat_filters_d); free(texture_filters); cudaFree(texture_filters_d); free(flat_threshold_list); cudaFree(flat_threshold_list_d); LOGD("done 4"); //cudaDeviceReset(); LOGD("blah 4"); return 0; }
984043382dcc46931a65bbc5bc12217c4af7a7d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "histogram.h" #include "utils.h" #include <vector> #include <random> #include <chrono> // #include <PAPIProf.h> #include <omp.h> #include <string> #include <algorithm> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> using namespace std; // Shared memory atomics histogram __global__ void histogram(const double * input, int * output, const double cut_left, const double cut_right, const int bins, const int n) //const double inv_bin_width) { const double inv_bin_width = bins / (cut_right - cut_left); extern __shared__ unsigned int sh_hist[]; for (size_t i = threadIdx.x; i < bins; i += blockDim.x) { sh_hist[i] = 0; } for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < bins; i += blockDim.x * gridDim.x) { output[i] = 0; } __syncthreads(); for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if ((input[i] < cut_left) || (input[i] > cut_right)) continue; const int ffbin = (int) ((input[i] - cut_left) * inv_bin_width); atomicAdd( &sh_hist[ffbin], 1); } __syncthreads(); for (size_t i = threadIdx.x; i < bins; i += blockDim.x) { atomicAdd( &(output[i]), sh_hist[i] ); } } int main(int argc, char const *argv[]) { int n_turns = 50000; int n_particles = 1000000; int n_slices = 1000; int blocks = 512; int threads = 1024; if (argc > 1) n_turns = atoi(argv[1]); if (argc > 2) n_particles = atoi(argv[2]); if (argc > 3) n_slices = atoi(argv[3]); if (argc > 4) blocks = atoi(argv[4]); if (argc > 5) threads = atoi(argv[5]); // setup random engine default_random_engine gen; uniform_real_distribution<double> d(0.0, 1.0); // initialize variables vector<double> dt, dE; vector<int> profile; string input = HOME "/input_files/distribution_10M_particles.txt"; read_distribution(input, n_particles, dt, dE); double cut_left, cut_right; profile.resize(n_slices); cut_left = 1.05 * (*min_element(dt.begin(), dt.end())); cut_right = 0.95 * (*max_element(dt.begin(), dt.end())); if (cut_left > cut_right) swap(cut_left, cut_right); thrust::device_vector<double> dev_dt = dt; thrust::device_vector<int> dev_profile = profile; auto start = chrono::high_resolution_clock::now(); // main loop for (int i = 0; i < n_turns; ++i) { // thrust::fill(dev_profile.begin(), dev_profile.end(), 0); hipLaunchKernelGGL(( histogram) , dim3(blocks), dim3(threads), n_slices * sizeof(int), 0, thrust::raw_pointer_cast(dev_dt.data()), thrust::raw_pointer_cast(dev_profile.data()), cut_left, cut_right, n_slices, n_particles); hipDeviceSynchronize(); } auto end = chrono::high_resolution_clock::now(); thrust::copy(dev_profile.begin(), dev_profile.end(), profile.begin()); auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count(); printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n"); printf("histogram_gpu_v8\ttime(ms)\t%d\t0\t1\n", duration); printf("profile: %d\n", accumulate(profile.begin(), profile.end(), 0) / n_slices); return 0; }
984043382dcc46931a65bbc5bc12217c4af7a7d6.cu
#include <stdlib.h> #include <stdio.h> #include "histogram.h" #include "utils.h" #include <vector> #include <random> #include <chrono> // #include <PAPIProf.h> #include <omp.h> #include <string> #include <algorithm> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> using namespace std; // Shared memory atomics histogram __global__ void histogram(const double * input, int * output, const double cut_left, const double cut_right, const int bins, const int n) //const double inv_bin_width) { const double inv_bin_width = bins / (cut_right - cut_left); extern __shared__ unsigned int sh_hist[]; for (size_t i = threadIdx.x; i < bins; i += blockDim.x) { sh_hist[i] = 0; } for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < bins; i += blockDim.x * gridDim.x) { output[i] = 0; } __syncthreads(); for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if ((input[i] < cut_left) || (input[i] > cut_right)) continue; const int ffbin = (int) ((input[i] - cut_left) * inv_bin_width); atomicAdd( &sh_hist[ffbin], 1); } __syncthreads(); for (size_t i = threadIdx.x; i < bins; i += blockDim.x) { atomicAdd( &(output[i]), sh_hist[i] ); } } int main(int argc, char const *argv[]) { int n_turns = 50000; int n_particles = 1000000; int n_slices = 1000; int blocks = 512; int threads = 1024; if (argc > 1) n_turns = atoi(argv[1]); if (argc > 2) n_particles = atoi(argv[2]); if (argc > 3) n_slices = atoi(argv[3]); if (argc > 4) blocks = atoi(argv[4]); if (argc > 5) threads = atoi(argv[5]); // setup random engine default_random_engine gen; uniform_real_distribution<double> d(0.0, 1.0); // initialize variables vector<double> dt, dE; vector<int> profile; string input = HOME "/input_files/distribution_10M_particles.txt"; read_distribution(input, n_particles, dt, dE); double cut_left, cut_right; profile.resize(n_slices); cut_left = 1.05 * (*min_element(dt.begin(), dt.end())); cut_right = 0.95 * (*max_element(dt.begin(), dt.end())); if (cut_left > cut_right) swap(cut_left, cut_right); thrust::device_vector<double> dev_dt = dt; thrust::device_vector<int> dev_profile = profile; auto start = chrono::high_resolution_clock::now(); // main loop for (int i = 0; i < n_turns; ++i) { // thrust::fill(dev_profile.begin(), dev_profile.end(), 0); histogram <<< blocks, threads, n_slices * sizeof(int)>>>( thrust::raw_pointer_cast(dev_dt.data()), thrust::raw_pointer_cast(dev_profile.data()), cut_left, cut_right, n_slices, n_particles); cudaThreadSynchronize(); } auto end = chrono::high_resolution_clock::now(); thrust::copy(dev_profile.begin(), dev_profile.end(), profile.begin()); auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count(); printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n"); printf("histogram_gpu_v8\ttime(ms)\t%d\t0\t1\n", duration); printf("profile: %d\n", accumulate(profile.begin(), profile.end(), 0) / n_slices); return 0; }
9619bcb78009ba6e41e8d2ef0e4b7b7dfb3bce11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #define PI 3.1415926535897932384626433832795029f #define PIx2 6.2831853071795864769252867665590058f #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define K_ELEMS_PER_GRID 2048 #define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512 #define KERNEL_Q_THREADS_PER_BLOCK 256 #define KERNEL_Q_K_ELEMS_PER_GRID 1024 #define CUDA_ERRCK \ {hipError_t err; \ if ((err = hipGetLastError()) != hipSuccess) { \ fprintf(stderr, "CUDA error on line %d: %s\n", __LINE__, hipGetErrorString(err)); \ exit(-1); \ } \ } struct kValues { float Kx; float Ky; float Kz; float PhiMag; }; /* Values in the k-space coordinate system are stored in constant memory * on the GPU */ __constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID]; __global__ void ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) { int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x; if (indexK < numK) { float real = phiR[indexK]; float imag = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } __global__ void ComputeQ_GPU(int numK, int kGlobalIndex, float* x, float* y, float* z, float* Qr , float* Qi) { float sX; float sY; float sZ; float sQr; float sQi; // Determine the element of the X arrays computed by this thread int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x; // Read block's X values from global mem to shared mem sX = x[xIndex]; sY = y[xIndex]; sZ = z[xIndex]; sQr = Qr[xIndex]; sQi = Qi[xIndex]; // Loop over all elements of K in constant mem to compute a partial value // for X. int kIndex = 0; if (numK % 2) { float expArg = PIx2 * (ck[0].Kx * sX + ck[0].Ky * sY + ck[0].Kz * sZ); sQr += ck[0].PhiMag * cos(expArg); sQi += ck[0].PhiMag * sin(expArg); kIndex++; kGlobalIndex++; } for (; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK); kIndex += 2, kGlobalIndex += 2) { float expArg = PIx2 * (ck[kIndex].Kx * sX + ck[kIndex].Ky * sY + ck[kIndex].Kz * sZ); sQr += ck[kIndex].PhiMag * cos(expArg); sQi += ck[kIndex].PhiMag * sin(expArg); int kIndex1 = kIndex + 1; float expArg1 = PIx2 * (ck[kIndex1].Kx * sX + ck[kIndex1].Ky * sY + ck[kIndex1].Kz * sZ); sQr += ck[kIndex1].PhiMag * cos(expArg1); sQi += ck[kIndex1].PhiMag * sin(expArg1); } Qr[xIndex] = sQr; Qi[xIndex] = sQi; } void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d) { int phiMagBlocks = numK / KERNEL_PHI_MAG_THREADS_PER_BLOCK; if (numK % KERNEL_PHI_MAG_THREADS_PER_BLOCK) phiMagBlocks++; dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1); dim3 DimPhiMagGrid(phiMagBlocks, 1); hipLaunchKernelGGL(( ComputePhiMag_GPU) , dim3(DimPhiMagGrid), dim3(DimPhiMagBlock) , 0, 0, phiR_d, phiI_d, phiMag_d, numK); } void computeQ_GPU(int numK, int numX, float* x_d, float* y_d, float* z_d, kValues* kVals, float* Qr_d, float* Qi_d) { int QGrids = numK / KERNEL_Q_K_ELEMS_PER_GRID; if (numK % KERNEL_Q_K_ELEMS_PER_GRID) QGrids++; int QBlocks = numX / KERNEL_Q_THREADS_PER_BLOCK; if (numX % KERNEL_Q_THREADS_PER_BLOCK) QBlocks++; dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1); dim3 DimQGrid(QBlocks, 1); for (int QGrid = 0; QGrid < QGrids; QGrid++) { // Put the tile of K values into constant mem int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID; kValues* kValsTile = kVals + QGridBase; int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase); hipMemcpyToSymbol(ck, kValsTile, numElems * sizeof(kValues), 0); for(int iter = 0; iter < 30000; iter++){ hipLaunchKernelGGL(( ComputeQ_GPU) , dim3(DimQGrid), dim3(DimQBlock) , 0, 0, numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d); } } } void createDataStructsCPU(int numK, int numX, float** phiMag, float** Qr, float** Qi) { *phiMag = (float* ) memalign(16, numK * sizeof(float)); *Qr = (float*) memalign(16, numX * sizeof (float)); *Qi = (float*) memalign(16, numX * sizeof (float)); }
9619bcb78009ba6e41e8d2ef0e4b7b7dfb3bce11.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #define PI 3.1415926535897932384626433832795029f #define PIx2 6.2831853071795864769252867665590058f #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define K_ELEMS_PER_GRID 2048 #define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512 #define KERNEL_Q_THREADS_PER_BLOCK 256 #define KERNEL_Q_K_ELEMS_PER_GRID 1024 #define CUDA_ERRCK \ {cudaError_t err; \ if ((err = cudaGetLastError()) != cudaSuccess) { \ fprintf(stderr, "CUDA error on line %d: %s\n", __LINE__, cudaGetErrorString(err)); \ exit(-1); \ } \ } struct kValues { float Kx; float Ky; float Kz; float PhiMag; }; /* Values in the k-space coordinate system are stored in constant memory * on the GPU */ __constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID]; __global__ void ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) { int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x; if (indexK < numK) { float real = phiR[indexK]; float imag = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } __global__ void ComputeQ_GPU(int numK, int kGlobalIndex, float* x, float* y, float* z, float* Qr , float* Qi) { float sX; float sY; float sZ; float sQr; float sQi; // Determine the element of the X arrays computed by this thread int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x; // Read block's X values from global mem to shared mem sX = x[xIndex]; sY = y[xIndex]; sZ = z[xIndex]; sQr = Qr[xIndex]; sQi = Qi[xIndex]; // Loop over all elements of K in constant mem to compute a partial value // for X. int kIndex = 0; if (numK % 2) { float expArg = PIx2 * (ck[0].Kx * sX + ck[0].Ky * sY + ck[0].Kz * sZ); sQr += ck[0].PhiMag * cos(expArg); sQi += ck[0].PhiMag * sin(expArg); kIndex++; kGlobalIndex++; } for (; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK); kIndex += 2, kGlobalIndex += 2) { float expArg = PIx2 * (ck[kIndex].Kx * sX + ck[kIndex].Ky * sY + ck[kIndex].Kz * sZ); sQr += ck[kIndex].PhiMag * cos(expArg); sQi += ck[kIndex].PhiMag * sin(expArg); int kIndex1 = kIndex + 1; float expArg1 = PIx2 * (ck[kIndex1].Kx * sX + ck[kIndex1].Ky * sY + ck[kIndex1].Kz * sZ); sQr += ck[kIndex1].PhiMag * cos(expArg1); sQi += ck[kIndex1].PhiMag * sin(expArg1); } Qr[xIndex] = sQr; Qi[xIndex] = sQi; } void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d) { int phiMagBlocks = numK / KERNEL_PHI_MAG_THREADS_PER_BLOCK; if (numK % KERNEL_PHI_MAG_THREADS_PER_BLOCK) phiMagBlocks++; dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1); dim3 DimPhiMagGrid(phiMagBlocks, 1); ComputePhiMag_GPU <<< DimPhiMagGrid, DimPhiMagBlock >>> (phiR_d, phiI_d, phiMag_d, numK); } void computeQ_GPU(int numK, int numX, float* x_d, float* y_d, float* z_d, kValues* kVals, float* Qr_d, float* Qi_d) { int QGrids = numK / KERNEL_Q_K_ELEMS_PER_GRID; if (numK % KERNEL_Q_K_ELEMS_PER_GRID) QGrids++; int QBlocks = numX / KERNEL_Q_THREADS_PER_BLOCK; if (numX % KERNEL_Q_THREADS_PER_BLOCK) QBlocks++; dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1); dim3 DimQGrid(QBlocks, 1); for (int QGrid = 0; QGrid < QGrids; QGrid++) { // Put the tile of K values into constant mem int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID; kValues* kValsTile = kVals + QGridBase; int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase); cudaMemcpyToSymbol(ck, kValsTile, numElems * sizeof(kValues), 0); for(int iter = 0; iter < 30000; iter++){ ComputeQ_GPU <<< DimQGrid, DimQBlock >>> (numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d); } } } void createDataStructsCPU(int numK, int numX, float** phiMag, float** Qr, float** Qi) { *phiMag = (float* ) memalign(16, numK * sizeof(float)); *Qr = (float*) memalign(16, numX * sizeof (float)); *Qi = (float*) memalign(16, numX * sizeof (float)); }
13c00f70c3ec7064fc148137409f9ddec98b9148.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the Cuda OpenGL bindings to dynamically modify a vertex buffer using a Cuda kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with Cuda 3. Map the VBO for writing from Cuda 4. Run Cuda kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL Host code */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // OpenGL Graphics includes #include <helper_gl.h> // #include <GL/glew.h> // gl buffers & interop #include <GL/freeglut.h> // includes, cuda #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #define REFRESH_DELAY 10 //ms //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // vbo variables GLuint vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; float g_fAnim = 0.0; // StopWatchInterface *timer = NULL; // Auto-Verification Code int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward bool runTest(); void cleanup(); // GL functionality bool initGL(int *argc, char **argv); void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res); // rendering callbacks void display(); void timerEvent(int value); // Cuda functionality void runCuda(struct cudaGraphicsResource **vbo_resource); const char *sSDKsample = "simpleGL (VBO)"; /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); hipLaunchKernelGGL(( simple_vbo_kernel), dim3(grid), dim3(block), 0, 0, pos, mesh_width, mesh_height, time); } #define checkCudaErrors(ans) { _gpuAssert((ans), __FILE__, __LINE__); } inline void _gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert [%s (%d)]: %s\n", file, line, hipGetErrorString(code)); if (abort) exit(code); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); printf("\n"); // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. if(!initGL(&argc, argv)) return 1; runTest(); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { // avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; // fpsLimit = (int)MAX(avgFPS, 1.f); // sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS); glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL(int *argc, char **argv) { glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Cuda GL Interop (VBO)"); glutDisplayFunc(display); glutCloseFunc(cleanup); // glutTimerFunc(REFRESH_DELAY, timerEvent,0); // default initialization glClearColor(0.0, 0.0, 0.0, 1.0); glDisable(GL_DEPTH_TEST); // viewport glViewport(0, 0, window_width, window_height); // projection glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); SDK_CHECK_ERROR_GL(); return true; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest() { // Create the CUTIL timer // sdkCreateTimer(&timer); int devID; hipGetDevice(&devID); // create VBO createVBO(&vbo, &cuda_vbo_resource, hipGraphicsMapFlagsWriteDiscard); // run the cuda part runCuda(&cuda_vbo_resource); // start rendering mainloop glutMainLoop(); return true; } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { // map OpenGL buffer object for writing from CUDA float4 *dptr; checkCudaErrors(hipGraphicsMapResources(1, vbo_resource, 0)); size_t num_bytes; checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); //printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes); // execute the kernel // dim3 block(8, 8, 1); // dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); // kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim); launch_kernel(dptr, mesh_width, mesh_height, g_fAnim); // unmap buffer object checkCudaErrors(hipGraphicsUnmapResources(1, vbo_resource, 0)); } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); // create buffer object glGenBuffers(1, vbo); glBindBuffer(GL_ARRAY_BUFFER, *vbo); // initialize buffer object unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); SDK_CHECK_ERROR_GL(); } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { // unregister this buffer object with CUDA checkCudaErrors(hipGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { // sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor3f(1.0, 0.0, 0.0); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); g_fAnim += 0.01f; // sdkStopTimer(&timer); glutPostRedisplay(); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } void cleanup() { // sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); } }
13c00f70c3ec7064fc148137409f9ddec98b9148.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the Cuda OpenGL bindings to dynamically modify a vertex buffer using a Cuda kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with Cuda 3. Map the VBO for writing from Cuda 4. Run Cuda kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL Host code */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // OpenGL Graphics includes #include <helper_gl.h> // #include <GL/glew.h> // gl buffers & interop #include <GL/freeglut.h> // includes, cuda #include <cuda_runtime.h> #include <cuda_gl_interop.h> #define REFRESH_DELAY 10 //ms //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // vbo variables GLuint vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; float g_fAnim = 0.0; // StopWatchInterface *timer = NULL; // Auto-Verification Code int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward bool runTest(); void cleanup(); // GL functionality bool initGL(int *argc, char **argv); void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res); // rendering callbacks void display(); void timerEvent(int value); // Cuda functionality void runCuda(struct cudaGraphicsResource **vbo_resource); const char *sSDKsample = "simpleGL (VBO)"; /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); simple_vbo_kernel<<< grid, block>>>(pos, mesh_width, mesh_height, time); } #define checkCudaErrors(ans) { _gpuAssert((ans), __FILE__, __LINE__); } inline void _gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert [%s (%d)]: %s\n", file, line, cudaGetErrorString(code)); if (abort) exit(code); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); printf("\n"); // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. if(!initGL(&argc, argv)) return 1; runTest(); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { // avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; // fpsLimit = (int)MAX(avgFPS, 1.f); // sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS); glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL(int *argc, char **argv) { glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Cuda GL Interop (VBO)"); glutDisplayFunc(display); glutCloseFunc(cleanup); // glutTimerFunc(REFRESH_DELAY, timerEvent,0); // default initialization glClearColor(0.0, 0.0, 0.0, 1.0); glDisable(GL_DEPTH_TEST); // viewport glViewport(0, 0, window_width, window_height); // projection glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); SDK_CHECK_ERROR_GL(); return true; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest() { // Create the CUTIL timer // sdkCreateTimer(&timer); int devID; cudaGetDevice(&devID); // create VBO createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard); // run the cuda part runCuda(&cuda_vbo_resource); // start rendering mainloop glutMainLoop(); return true; } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { // map OpenGL buffer object for writing from CUDA float4 *dptr; checkCudaErrors(cudaGraphicsMapResources(1, vbo_resource, 0)); size_t num_bytes; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); //printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes); // execute the kernel // dim3 block(8, 8, 1); // dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); // kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim); launch_kernel(dptr, mesh_width, mesh_height, g_fAnim); // unmap buffer object checkCudaErrors(cudaGraphicsUnmapResources(1, vbo_resource, 0)); } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); // create buffer object glGenBuffers(1, vbo); glBindBuffer(GL_ARRAY_BUFFER, *vbo); // initialize buffer object unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); SDK_CHECK_ERROR_GL(); } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { // unregister this buffer object with CUDA checkCudaErrors(cudaGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { // sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor3f(1.0, 0.0, 0.0); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); g_fAnim += 0.01f; // sdkStopTimer(&timer); glutPostRedisplay(); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } void cleanup() { // sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); } }
a7e5e4a94e21acf7fa52bac4eb94b1d90f164445.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* part3.cu */ #include "part3_conf.h" #include "part3_cpu.h" #include "part3_kernel.cu" /* ====================== Main =========================== */ int main(int argc, char* argv[]) { int *A, *A_device, *A_reverse_device, *A_aux; int block_number; A = (int*) malloc(N*sizeof(int)); A_aux = (int*) malloc(N*sizeof(int)); hipMalloc( (void **) &A_device, N*sizeof(int) ); hipMalloc( (void **) &A_reverse_device, N*sizeof(int) ); // Nombre de blocs utiliser : block_number = N/BLOCK_SIZE; if ( (N % BLOCK_SIZE) != 0 ) block_number++; // On cre A et on le stocke directement dans A_device : init_tab(A, N); //printf("A = [ "); display_tab(A, N); hipMemcpy( A_device, A, N*sizeof(int), hipMemcpyHostToDevice ); // On inverse A sur le CPU : reverse_array(A, N); //printf("A_inv = [ "); display_tab(A, N); // On inverse A sur le GPU : hipLaunchKernelGGL(( _reverse_array), dim3(block_number), dim3(BLOCK_SIZE), 0, 0, A_device, N, A_reverse_device); hipMemcpy( A_aux, A_reverse_device, N*sizeof(int), hipMemcpyDeviceToHost ); //printf("A_aux = [ "); display_tab(A_aux, N); // test d'galit de tableaux : is_that_equal(A, A_aux, N); // On inverse A sur le GPU avec la mmoire partage : hipLaunchKernelGGL(( _reverse_array2), dim3(block_number), dim3(BLOCK_SIZE), 0, 0, A_device, N, A_reverse_device); hipMemcpy( A_aux, A_reverse_device, N*sizeof(int), hipMemcpyDeviceToHost ); is_that_equal(A, A_aux, N); }
a7e5e4a94e21acf7fa52bac4eb94b1d90f164445.cu
/* part3.cu */ #include "part3_conf.h" #include "part3_cpu.h" #include "part3_kernel.cu" /* ====================== Main =========================== */ int main(int argc, char* argv[]) { int *A, *A_device, *A_reverse_device, *A_aux; int block_number; A = (int*) malloc(N*sizeof(int)); A_aux = (int*) malloc(N*sizeof(int)); cudaMalloc( (void **) &A_device, N*sizeof(int) ); cudaMalloc( (void **) &A_reverse_device, N*sizeof(int) ); // Nombre de blocs à utiliser : block_number = N/BLOCK_SIZE; if ( (N % BLOCK_SIZE) != 0 ) block_number++; // On crée A et on le stocke directement dans A_device : init_tab(A, N); //printf("A = [ "); display_tab(A, N); cudaMemcpy( A_device, A, N*sizeof(int), cudaMemcpyHostToDevice ); // On inverse A sur le CPU : reverse_array(A, N); //printf("A_inv = [ "); display_tab(A, N); // On inverse A sur le GPU : _reverse_array<<<block_number, BLOCK_SIZE>>>(A_device, N, A_reverse_device); cudaMemcpy( A_aux, A_reverse_device, N*sizeof(int), cudaMemcpyDeviceToHost ); //printf("A_aux = [ "); display_tab(A_aux, N); // test d'égalité de tableaux : is_that_equal(A, A_aux, N); // On inverse A sur le GPU avec la mémoire partagée : _reverse_array2<<<block_number, BLOCK_SIZE>>>(A_device, N, A_reverse_device); cudaMemcpy( A_aux, A_reverse_device, N*sizeof(int), cudaMemcpyDeviceToHost ); is_that_equal(A, A_aux, N); }
3e91258dd8ccfa18bee246a6967b70216eee2b03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: %run_test hipify "%s" "%t" %cuda_args #include <iostream> #define TOKEN_PASTE(X, Y) X ## Y #define ARG_LIST_AS_MACRO a, device_x, device_y #define KERNEL_CALL_AS_MACROhipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, #define KERNEL_NAME_MACRO( axpy<float>) // CHECK: #define COMPLETE_LAUNCH hipLaunchKernelGGLaxpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y) #define COMPLETE_LAUNCHhipLaunchKernelGGL(( axpy), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y) template<typename T> __global__ void axpy(T a, T *x, T *y) { // CHECK: y[hipThreadIdx_x] = a * x[hipThreadIdx_x]; y[threadIdx.x] = a * x[threadIdx.x]; } int main(int argc, char* argv[]) { const int kDataLen = 4; float a = 2.0f; float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; float host_y[kDataLen]; // Copy input data to device. float* device_x; float* device_y; // CHECK: hipMalloc(&device_x, kDataLen * sizeof(float)); hipMalloc(&device_x, kDataLen * sizeof(float)); // CHECK: hipMalloc(&device_y, kDataLen * sizeof(float)); hipMalloc(&device_y, kDataLen * sizeof(float)); // CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice); // Launch the kernel in numerous different strange ways to exercise the prerocessor. // CHECK: hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, TOKEN_PASTE(device, _x), device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, TOKEN_PASTE(device, _x), device_y); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); // CHECK: hipLaunchKernelGGL(KERNEL_NAME_MACRO, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); hipLaunchKernelGGL(( KERNEL_NAME_MACRO), dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); KERNEL_CALL_AS_MACRO(ARG_LIST_AS_MACRO); // CHECK: COMPLETE_LAUNCH; COMPLETE_LAUNCH; // Copy output data to host. // CHECK: hipDeviceSynchronize(); hipDeviceSynchronize(); // CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_y[i] << "\n"; } // CHECK: hipDeviceReset(); hipDeviceReset(); return 0; }
3e91258dd8ccfa18bee246a6967b70216eee2b03.cu
// RUN: %run_test hipify "%s" "%t" %cuda_args #include <iostream> #define TOKEN_PASTE(X, Y) X ## Y #define ARG_LIST_AS_MACRO a, device_x, device_y #define KERNEL_CALL_AS_MACRO axpy<float><<<1, kDataLen>>> #define KERNEL_NAME_MACRO axpy<float> // CHECK: #define COMPLETE_LAUNCH hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y) #define COMPLETE_LAUNCH axpy<<<1, kDataLen>>>(a, device_x, device_y) template<typename T> __global__ void axpy(T a, T *x, T *y) { // CHECK: y[hipThreadIdx_x] = a * x[hipThreadIdx_x]; y[threadIdx.x] = a * x[threadIdx.x]; } int main(int argc, char* argv[]) { const int kDataLen = 4; float a = 2.0f; float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; float host_y[kDataLen]; // Copy input data to device. float* device_x; float* device_y; // CHECK: hipMalloc(&device_x, kDataLen * sizeof(float)); cudaMalloc(&device_x, kDataLen * sizeof(float)); // CHECK: hipMalloc(&device_y, kDataLen * sizeof(float)); cudaMalloc(&device_y, kDataLen * sizeof(float)); // CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice); cudaMemcpy(device_x, host_x, kDataLen * sizeof(float), cudaMemcpyHostToDevice); // Launch the kernel in numerous different strange ways to exercise the prerocessor. // CHECK: hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); axpy<<<1, kDataLen>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); axpy<float><<<1, kDataLen>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, TOKEN_PASTE(device, _x), device_y); axpy<float><<<1, kDataLen>>>(a, TOKEN_PASTE(device, _x), device_y); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); axpy<float><<<1, kDataLen>>>(ARG_LIST_AS_MACRO); // CHECK: hipLaunchKernelGGL(KERNEL_NAME_MACRO, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); KERNEL_NAME_MACRO<<<1, kDataLen>>>(ARG_LIST_AS_MACRO); // CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO); KERNEL_CALL_AS_MACRO(ARG_LIST_AS_MACRO); // CHECK: COMPLETE_LAUNCH; COMPLETE_LAUNCH; // Copy output data to host. // CHECK: hipDeviceSynchronize(); cudaDeviceSynchronize(); // CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost); cudaMemcpy(host_y, device_y, kDataLen * sizeof(float), cudaMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_y[i] << "\n"; } // CHECK: hipDeviceReset(); cudaDeviceReset(); return 0; }
2b15b6f1093e6b80fb0147826dfa215c30fb0c41.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "selection_sort_gpu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int b = 2; int n = XSIZE*YSIZE; int m = 2; int k = 1; const float *dist = NULL; hipMalloc(&dist, XSIZE*YSIZE); int *outi = NULL; hipMalloc(&outi, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( selection_sort_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,k,dist,outi,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( selection_sort_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,k,dist,outi,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( selection_sort_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,k,dist,outi,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2b15b6f1093e6b80fb0147826dfa215c30fb0c41.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "selection_sort_gpu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int b = 2; int n = XSIZE*YSIZE; int m = 2; int k = 1; const float *dist = NULL; cudaMalloc(&dist, XSIZE*YSIZE); int *outi = NULL; cudaMalloc(&outi, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); selection_sort_gpu<<<gridBlock,threadBlock>>>(b,n,m,k,dist,outi,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { selection_sort_gpu<<<gridBlock,threadBlock>>>(b,n,m,k,dist,outi,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { selection_sort_gpu<<<gridBlock,threadBlock>>>(b,n,m,k,dist,outi,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
879d28ce4cfe500072233371b326cd18af55de82.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <iostream> #include <numeric> #include <array> #include <vector> #include <stdlib.h> #include <random> #include <thread> #include <thrust/reduce.h> #include <thrust/count.h> #include <thrust/remove.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/system/hip/execution_policy.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <thread> #include "cClipping.h" #include "cTimer.h" #include "thrust_rmm_allocator.h" cTimer timer; typedef rmm::device_vector<float>::iterator IterFloat; typedef rmm::device_vector<int>::iterator IterInt; // typedef thrust::device_vector<float>::iterator IterFloat; // typedef thrust::device_vector<int>::iterator IterInt; #define MB (1024*1024) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /** Clips the input vector given the input-defined plane @param posIn: input vector to be clipped normal: normal of the plane with which posIn will be clipped d: the point along with the nomral with which posIn will be clipped */ void clip (rmm::device_vector<float> *posIn, float *normal, float d) { plane_clippingPDBver2 clip (normal, d); strided_range<IterFloat> X ( posIn->begin() , posIn->end(), 4); strided_range<IterFloat> Y ( posIn->begin()+1, posIn->end(), 4); strided_range<IterFloat> Z ( posIn->begin()+2, posIn->end(), 4); strided_range<IterFloat> W ( posIn->begin()+3, posIn->end(), 4); size_t new_size = thrust::remove_if( thrust::make_zip_iterator ( thrust::make_tuple( X.begin(), Y.begin(), Z.begin(), W.begin() )), thrust::make_zip_iterator ( thrust::make_tuple( X.end(),Y.end(), Z.end(), W.end() )), clip ) - thrust::make_zip_iterator(thrust::make_tuple(X.begin(), Y.begin(), Z.begin(), W.begin() )); // Resize the input vector to appropriate size posIn->resize(new_size*4); } /** Copy vec to d_vec on the given thread @param devId: stream ID used to copy the vectors vec: vector to be copied from d_vec: vector to be copied to */ void memCopyHtoD (int devId, std::vector<float> *vec, rmm::device_vector<float> *d_vec ) { gpuErrchk(hipSetDevice(devId)); *d_vec = *vec; } /** Copy vec to d_vec on the given thread @param devId: stream ID used to copy the vectors vec: vector to be copied from d_vec: vector to be copied to */ void memCopyDtoH (int devId, std::vector<float> *vec, rmm::device_vector<float> *d_vec ) { gpuErrchk(hipSetDevice(devId)); vec->resize(d_vec->size()); thrust::copy(d_vec->begin(), d_vec->end(), vec->begin()); } /** Run clipping on given thread @param devId: stream ID used to run the clipping d_pos: vector to be clipped normal: normal of the plane with which d_pos will be clipped d: the point along with the nomral with which d_pos will be clipped */ void launch (int devId, rmm::device_vector<float> *d_pos, float *normal, float d) { gpuErrchk(hipSetDevice(devId)); clip (d_pos, normal, d); } int main(int argc, char *argv[]) { size_t sx, sy, sz; int numStreams, i, j; int deviceCount = 0; unsigned int iter = 0; double elapsed = 0.0; double totalElapsed = 0.0; double htodElapsed = 0.0; double dtohElapsed = 0.0; double transferElapsed = 0.0; double computeElapsed = 0.0; // This willl be used to generate plane's normals randomly // between -1 to 1 std::mt19937 rng(time(NULL)); std::uniform_real_distribution<float> gen(-1.0, 1.0); // plane defined by normal and D float normal[3], d = 0.5f; if (argc < 5){ std::cout << "Usage: clipping x_size y_size z_size iterations" << std::endl; return 1; } sx = std::stoll (std::string(argv[1])); sy = std::stoll (std::string(argv[2])); sz = std::stoll (std::string(argv[3])); iter = std::stoi (std::string(argv[4])); size_t numParticles = sx*sy*sz; // Retrieve the number of streams/devices we can use gpuErrchk(hipGetDeviceCount(&deviceCount)); numStreams = deviceCount; size_t numParticlesPerThread = sx*sy*sz/numStreams; std::cout << "========\n"; std::cout << "Domain size is " << sx << " x " << sy << " x " << sz << " = " << numParticles << " particles" << std::endl; std::cout << "Size MB " << (sizeof(float) * numParticles * 4.0) / MB <<std::endl; std::cout << "Num. Devices " << deviceCount << std::endl; std::cout << "Particles per device " << numParticlesPerThread << std::endl; std::cout << "Size MB per device " << (sizeof(float) * numParticlesPerThread * 4.0) / MB <<std::endl; // Define array of vectors and threads to be used std::thread thread[numStreams]; std::vector <float> pos[numStreams]; std::vector <float> posOut[numStreams]; rmm::device_vector<float> d_pos[numStreams]; std::cout << "Generated particles...\n"; // Types of allocations: // CudaDefaultAllocation // PoolAllocation // CudaManagedMemory rmmOptions_t options{rmmAllocationMode_t::CudaManagedMemory, 0, true}; rmmInitialize(&options); // Timer to record time taken to initialize dataset timer.reset(); // Launch threads and join for (i=0;i<numStreams;i++) { size_t szMin = i*(sz/numStreams); size_t szMax = (i+1)*(sz/numStreams); thread[i] =std::thread (initDatasetChunk, &pos[i], sx, sy, szMin, szMax); } for(i = 0; i < numStreams; i++) { thread[i].join (); } std::cout << "in " << timer.getElapsedMilliseconds() << " ms\n"; for(j=0;j<iter;j++) { // Generating plane's normals randomly // between -1 to 1 normal[0] = gen(rng); normal[1] = gen(rng); normal[2] = gen(rng); // Copy H to D timer.reset (); for (i=0;i<numStreams;i++) { thread[i] =std::thread (memCopyHtoD, i, &pos[i], &d_pos[i]); } for(i = 0; i < numStreams; i++) { thread[i].join (); } elapsed = timer.getElapsedMilliseconds(); std::cout << "H to D: " << elapsed << " ms\n"; htodElapsed+=elapsed; transferElapsed+=elapsed; // launch the clip kernel timer.reset (); for(i = 0; i < numStreams; i++) { thread[i] =std::thread (launch, i, &d_pos[i], normal, d ); } for(i = 0; i < numStreams; i++) { thread[i].join (); } elapsed = timer.getElapsedMilliseconds(); std::cout << "Clipping: " << elapsed << " ms\n"; computeElapsed += elapsed; // Copy D to H timer.reset (); for (i=0;i<numStreams;i++) { thread[i] =std::thread (memCopyDtoH, i, &posOut[i], &d_pos[i]); } for(i = 0; i < numStreams; i++) { thread[i].join (); } elapsed = timer.getElapsedMilliseconds(); std::cout << "D to H: " << elapsed << " ms\n"; dtohElapsed+=elapsed; transferElapsed+=elapsed; } std::cout << "--------\n"; totalElapsed = computeElapsed + transferElapsed; std::cout << "H to D Avg time (ms) after " << iter << " iterations " << htodElapsed / iter << std::endl; std::cout << "D to H Avg time (ms) after " << iter << " iterations " << dtohElapsed / iter << std::endl; std::cout << "Transfers Avg time (ms) after " << iter << " iterations " << transferElapsed / iter << std::endl; std::cout << "Compute Avg time (ms) after " << iter << " iterations " << computeElapsed / iter << std::endl; std::cout << "Total Avg time (ms) after " << iter << " iterations " << totalElapsed / iter << std::endl; return 0; }
879d28ce4cfe500072233371b326cd18af55de82.cu
#include <algorithm> #include <iostream> #include <numeric> #include <array> #include <vector> #include <stdlib.h> #include <random> #include <thread> #include <thrust/reduce.h> #include <thrust/count.h> #include <thrust/remove.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/system/cuda/execution_policy.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thread> #include "cClipping.h" #include "cTimer.h" #include "thrust_rmm_allocator.h" cTimer timer; typedef rmm::device_vector<float>::iterator IterFloat; typedef rmm::device_vector<int>::iterator IterInt; // typedef thrust::device_vector<float>::iterator IterFloat; // typedef thrust::device_vector<int>::iterator IterInt; #define MB (1024*1024) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** Clips the input vector given the input-defined plane @param posIn: input vector to be clipped normal: normal of the plane with which posIn will be clipped d: the point along with the nomral with which posIn will be clipped */ void clip (rmm::device_vector<float> *posIn, float *normal, float d) { plane_clippingPDBver2 clip (normal, d); strided_range<IterFloat> X ( posIn->begin() , posIn->end(), 4); strided_range<IterFloat> Y ( posIn->begin()+1, posIn->end(), 4); strided_range<IterFloat> Z ( posIn->begin()+2, posIn->end(), 4); strided_range<IterFloat> W ( posIn->begin()+3, posIn->end(), 4); size_t new_size = thrust::remove_if( thrust::make_zip_iterator ( thrust::make_tuple( X.begin(), Y.begin(), Z.begin(), W.begin() )), thrust::make_zip_iterator ( thrust::make_tuple( X.end(),Y.end(), Z.end(), W.end() )), clip ) - thrust::make_zip_iterator(thrust::make_tuple(X.begin(), Y.begin(), Z.begin(), W.begin() )); // Resize the input vector to appropriate size posIn->resize(new_size*4); } /** Copy vec to d_vec on the given thread @param devId: stream ID used to copy the vectors vec: vector to be copied from d_vec: vector to be copied to */ void memCopyHtoD (int devId, std::vector<float> *vec, rmm::device_vector<float> *d_vec ) { gpuErrchk(cudaSetDevice(devId)); *d_vec = *vec; } /** Copy vec to d_vec on the given thread @param devId: stream ID used to copy the vectors vec: vector to be copied from d_vec: vector to be copied to */ void memCopyDtoH (int devId, std::vector<float> *vec, rmm::device_vector<float> *d_vec ) { gpuErrchk(cudaSetDevice(devId)); vec->resize(d_vec->size()); thrust::copy(d_vec->begin(), d_vec->end(), vec->begin()); } /** Run clipping on given thread @param devId: stream ID used to run the clipping d_pos: vector to be clipped normal: normal of the plane with which d_pos will be clipped d: the point along with the nomral with which d_pos will be clipped */ void launch (int devId, rmm::device_vector<float> *d_pos, float *normal, float d) { gpuErrchk(cudaSetDevice(devId)); clip (d_pos, normal, d); } int main(int argc, char *argv[]) { size_t sx, sy, sz; int numStreams, i, j; int deviceCount = 0; unsigned int iter = 0; double elapsed = 0.0; double totalElapsed = 0.0; double htodElapsed = 0.0; double dtohElapsed = 0.0; double transferElapsed = 0.0; double computeElapsed = 0.0; // This willl be used to generate plane's normals randomly // between -1 to 1 std::mt19937 rng(time(NULL)); std::uniform_real_distribution<float> gen(-1.0, 1.0); // plane defined by normal and D float normal[3], d = 0.5f; if (argc < 5){ std::cout << "Usage: clipping x_size y_size z_size iterations" << std::endl; return 1; } sx = std::stoll (std::string(argv[1])); sy = std::stoll (std::string(argv[2])); sz = std::stoll (std::string(argv[3])); iter = std::stoi (std::string(argv[4])); size_t numParticles = sx*sy*sz; // Retrieve the number of streams/devices we can use gpuErrchk(cudaGetDeviceCount(&deviceCount)); numStreams = deviceCount; size_t numParticlesPerThread = sx*sy*sz/numStreams; std::cout << "========\n"; std::cout << "Domain size is " << sx << " x " << sy << " x " << sz << " = " << numParticles << " particles" << std::endl; std::cout << "Size MB " << (sizeof(float) * numParticles * 4.0) / MB <<std::endl; std::cout << "Num. Devices " << deviceCount << std::endl; std::cout << "Particles per device " << numParticlesPerThread << std::endl; std::cout << "Size MB per device " << (sizeof(float) * numParticlesPerThread * 4.0) / MB <<std::endl; // Define array of vectors and threads to be used std::thread thread[numStreams]; std::vector <float> pos[numStreams]; std::vector <float> posOut[numStreams]; rmm::device_vector<float> d_pos[numStreams]; std::cout << "Generated particles...\n"; // Types of allocations: // CudaDefaultAllocation // PoolAllocation // CudaManagedMemory rmmOptions_t options{rmmAllocationMode_t::CudaManagedMemory, 0, true}; rmmInitialize(&options); // Timer to record time taken to initialize dataset timer.reset(); // Launch threads and join for (i=0;i<numStreams;i++) { size_t szMin = i*(sz/numStreams); size_t szMax = (i+1)*(sz/numStreams); thread[i] =std::thread (initDatasetChunk, &pos[i], sx, sy, szMin, szMax); } for(i = 0; i < numStreams; i++) { thread[i].join (); } std::cout << "in " << timer.getElapsedMilliseconds() << " ms\n"; for(j=0;j<iter;j++) { // Generating plane's normals randomly // between -1 to 1 normal[0] = gen(rng); normal[1] = gen(rng); normal[2] = gen(rng); // Copy H to D timer.reset (); for (i=0;i<numStreams;i++) { thread[i] =std::thread (memCopyHtoD, i, &pos[i], &d_pos[i]); } for(i = 0; i < numStreams; i++) { thread[i].join (); } elapsed = timer.getElapsedMilliseconds(); std::cout << "H to D: " << elapsed << " ms\n"; htodElapsed+=elapsed; transferElapsed+=elapsed; // launch the clip kernel timer.reset (); for(i = 0; i < numStreams; i++) { thread[i] =std::thread (launch, i, &d_pos[i], normal, d ); } for(i = 0; i < numStreams; i++) { thread[i].join (); } elapsed = timer.getElapsedMilliseconds(); std::cout << "Clipping: " << elapsed << " ms\n"; computeElapsed += elapsed; // Copy D to H timer.reset (); for (i=0;i<numStreams;i++) { thread[i] =std::thread (memCopyDtoH, i, &posOut[i], &d_pos[i]); } for(i = 0; i < numStreams; i++) { thread[i].join (); } elapsed = timer.getElapsedMilliseconds(); std::cout << "D to H: " << elapsed << " ms\n"; dtohElapsed+=elapsed; transferElapsed+=elapsed; } std::cout << "--------\n"; totalElapsed = computeElapsed + transferElapsed; std::cout << "H to D Avg time (ms) after " << iter << " iterations " << htodElapsed / iter << std::endl; std::cout << "D to H Avg time (ms) after " << iter << " iterations " << dtohElapsed / iter << std::endl; std::cout << "Transfers Avg time (ms) after " << iter << " iterations " << transferElapsed / iter << std::endl; std::cout << "Compute Avg time (ms) after " << iter << " iterations " << computeElapsed / iter << std::endl; std::cout << "Total Avg time (ms) after " << iter << " iterations " << totalElapsed / iter << std::endl; return 0; }
9b5b1361c4401f3316abcbb5e7619706790fd3db.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA functions for ray-voxel intersection based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It usesthe so-called * Jacobs algorithm to compute efficiently the length of the x-rays over * voxel space. * * CODE by Ander Biguri * Sepideh Hatamikia (arbitrary rotation) * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "Siddon_projection.hpp" #include "TIGRE_common.hpp" #include <math.h> #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("Ax:Siddon_projection",hipGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 #define PROJ_PER_BLOCK 9 #define PIXEL_SIZE_BLOCK 9 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTexture(const GpuIds& gpuids,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool alloc); __constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device __global__ void vecAddInPlace(float *a, float *b, unsigned long n) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (idx < n) a[idx] = a[idx] + b[idx]; } __global__ void kernelPixelDetector( Geometry geo, float* detector, const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex){ unsigned long long u = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long v = blockIdx.y * blockDim.y + threadIdx.y; unsigned long long projNumber=threadIdx.z; if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK) return; #if IS_FOR_MATLAB_TIGRE size_t idx = (size_t)(u * (unsigned long long)geo.nDetecV + v)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ; #else size_t idx = (size_t)(v * (unsigned long long)geo.nDetecU + u)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ; #endif unsigned long indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array (for a given GPU) if(indAlpha>=totalNoOfProjections) return; Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaU = projParamsArrayDev[4*projNumber+1]; Point3D deltaV = projParamsArrayDev[4*projNumber+2]; Point3D source = projParamsArrayDev[4*projNumber+3]; /////// Get coordinates XYZ of pixel UV unsigned long pixelV = geo.nDetecV-v-1; unsigned long pixelU = u; Point3D pixel1D; pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); /////// // Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516 ////// // Also called Jacobs algorithms Point3D ray; // vector of Xray ray.x=pixel1D.x-source.x; ray.y=pixel1D.y-source.y; ray.z=pixel1D.z-source.z; float eps=0.001; ray.x=(fabsf(ray.x)<eps)? 0 : ray.x; ray.y=(fabsf(ray.y)<eps)? 0 : ray.y; ray.z=(fabsf(ray.z)<eps)? 0 : ray.z; // This variables are ommited because // bx,by,bz ={0,0,0} // dx,dy,dz ={1,1,1} // compute parameter values for x-ray parametric equation. eq(3-10) float axm,aym,azm; float axM,ayM,azM; // In the paper Nx= number of X planes-> Nvoxel+1 axm=fminf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x)); aym=fminf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y)); azm=fminf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z)); axM=fmaxf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x)); ayM=fmaxf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y)); azM=fmaxf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z)); float am=fmaxf(fmaxf(axm,aym),azm); float aM=fminf(fminf(axM,ayM),azM); // line intersects voxel space -> am<aM if (am>=aM) detector[idx]=0; // Compute max/min image INDEX for intersection eq(11-19) // Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version float imin,imax,jmin,jmax,kmin,kmax; // for X if( source.x<pixel1D.x){ imin=(am==axm)? 1.0f : ceilf (source.x+am*ray.x); imax=(aM==axM)? geo.nVoxelX : floorf(source.x+aM*ray.x); }else{ imax=(am==axm)? geo.nVoxelX-1.0f : floorf(source.x+am*ray.x); imin=(aM==axM)? 0.0f : ceilf (source.x+aM*ray.x); } // for Y if( source.y<pixel1D.y){ jmin=(am==aym)? 1.0f : ceilf (source.y+am*ray.y); jmax=(aM==ayM)? geo.nVoxelY : floorf(source.y+aM*ray.y); }else{ jmax=(am==aym)? geo.nVoxelY-1.0f : floorf(source.y+am*ray.y); jmin=(aM==ayM)? 0.0f : ceilf (source.y+aM*ray.y); } // for Z if( source.z<pixel1D.z){ kmin=(am==azm)? 1.0f : ceilf (source.z+am*ray.z); kmax=(aM==azM)? geo.nVoxelZ : floorf(source.z+aM*ray.z); }else{ kmax=(am==azm)? geo.nVoxelZ-1.0f : floorf(source.z+am*ray.z); kmin=(aM==azM)? 0.0f : ceilf (source.z+aM*ray.z); } // get intersection point N1. eq(20-21) [(also eq 9-10)] float ax,ay,az; ax=(source.x<pixel1D.x)? __fdividef(imin-source.x,ray.x) : __fdividef(imax-source.x,ray.x); ay=(source.y<pixel1D.y)? __fdividef(jmin-source.y,ray.y) : __fdividef(jmax-source.y,ray.y); az=(source.z<pixel1D.z)? __fdividef(kmin-source.z,ray.z) : __fdividef(kmax-source.z,ray.z); // If its Infinite (i.e. ray is parallel to axis), make sure its positive ax=(isinf(ax))? abs(ax) : ax; ay=(isinf(ay))? abs(ay) : ay; az=(isinf(az))? abs(az) : az; // get index of first intersection. eq (26) and (19) unsigned long i,j,k; float aminc=fminf(fminf(ax,ay),az); i=(unsigned long)floorf(source.x+ (aminc+am)*0.5f*ray.x); j=(unsigned long)floorf(source.y+ (aminc+am)*0.5f*ray.y); k=(unsigned long)floorf(source.z+ (aminc+am)*0.5f*ray.z); // Initialize float ac=am; //eq (28), unit anlges float axu,ayu,azu; axu=__frcp_rd(fabsf(ray.x)); ayu=__frcp_rd(fabsf(ray.y)); azu=__frcp_rd(fabsf(ray.z)); // eq(29), direction of update float iu,ju,ku; iu=(source.x< pixel1D.x)? 1.0f : -1.0f; ju=(source.y< pixel1D.y)? 1.0f : -1.0f; ku=(source.z< pixel1D.z)? 1.0f : -1.0f; float maxlength=__fsqrt_rd(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ); float sum=0.0f; unsigned long Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections // Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed i+=0.5f; j+=0.5f; k+=0.5f; for (unsigned long ii=0;ii<Np;ii++){ if (ax==aminc){ sum+=(ax-ac)*tex3D<float>(tex, i, j, k); i=i+iu; ac=ax; ax+=axu; }else if(ay==aminc){ sum+=(ay-ac)*tex3D<float>(tex, i, j, k); j=j+ju; ac=ay; ay+=ayu; }else if(az==aminc){ sum+=(az-ac)*tex3D<float>(tex, i, j, k); k=k+ku; ac=az; az+=azu; } aminc=fminf(fminf(ax,ay),az); } detector[idx]=sum*maxlength; } int siddon_ray_projection(float* img, Geometry geo, float** result,float const * const angles,int nangles, const GpuIds& gpuids){ // Prepare for MultiGPU int deviceCount = gpuids.GetLength(); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning thrown) // Check the available devices, and if they are the same if (!gpuids.AreEqualDevices()) { mexWarnMsgIdAndTxt("Ax:Siddon_projection:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed."); } int dev; // Check free memory size_t mem_GPU_global; checkFreeMemory(gpuids, &mem_GPU_global); size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float); // Does everything fit in the GPUs? const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global; unsigned int splits=1; if (!fits_in_memory) { // Nope nope. // approx free memory we have. We already have left some extra 5% free for internal stuff // we need a second projection memory to combine multi-GPU stuff. size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj; splits=mem_image/mem_free+1;// Ceil of the truncation } Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry)); splitImage(splits,geo,geoArray,nangles); // Allocate axuiliary memory for projections on the GPU to accumulate partial results float ** dProjection_accum; size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float); if (!fits_in_memory){ dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(gpuids[dev]); for (int i = 0; i < 2; ++i){ hipMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj); hipMemset(dProjection_accum[dev*2+i],0,num_bytes_proj); cudaCheckErrors("cudaMallocauxiliarty projections fail"); } } } // This is happening regarthless if the image fits on memory float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); for (int i = 0; i < 2; ++i){ hipMalloc((void**)&dProjection[dev*2+i], num_bytes_proj); hipMemset(dProjection[dev*2+i] ,0,num_bytes_proj); cudaCheckErrors("hipMalloc projections fail"); } } //Pagelock memory for synchronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus should have the same attributes. int isHostRegisterSupported = 0; #if CUDART_VERSION >= 9020 hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,gpuids[0]); #endif // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big. #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (splits>1 |deviceCount>1)){ hipHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable); } #endif cudaCheckErrors("Error pinning memory"); // auxiliary variables Point3D source, deltaU, deltaV, uvOrigin; Point3D* projParamsArrayHost; hipHostMalloc((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D)); cudaCheckErrors("Error allocating auxiliary constant memory"); // Create Streams for overlapping memcopy and compute int nStreams=deviceCount*2; hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));; for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); for (int i = 0; i < 2; ++i){ hipStreamCreate(&stream[i+dev*2]); } } cudaCheckErrors("Stream creation fail"); int nangles_device=(nangles+deviceCount-1)/deviceCount; int nangles_last_device=(nangles-(deviceCount-1)*nangles_device); unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management. int projection_this_block; hipTextureObject_t *texImg = new hipTextureObject_t[deviceCount]; hipArray **d_cuArrTex = new hipArray*[deviceCount]; for (unsigned int sp=0;sp<splits;sp++){ // Create texture objects for all GPUs size_t linear_idx_start; //First one should always be the same size as all the rest but the last linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ; CreateTexture(gpuids,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp); cudaCheckErrors("Texture object creation fail"); // Prepare kernel lauch variables int divU,divV; divU=PIXEL_SIZE_BLOCK; divV=PIXEL_SIZE_BLOCK; dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1); dim3 block(divU,divV,PROJ_PER_BLOCK); unsigned int proj_global; // Now that we have prepared the image (piece of image) and parameters for kernels // we project for all angles. for (unsigned int i=0; i<noOfKernelCalls; i++) { for (dev=0;dev<deviceCount;dev++){ hipSetDevice(gpuids[dev]); for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){ proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device; if (proj_global>=nangles) break; if ((i*PROJ_PER_BLOCK+j)>=nangles_device) break; geoArray[sp].alpha=angles[proj_global*3]; geoArray[sp].theta=angles[proj_global*3+1]; geoArray[sp].psi =angles[proj_global*3+2]; //precomute distances for faster execution //Precompute per angle constant stuff for speed computeDeltas_Siddon(geoArray[sp],proj_global, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[4*j+1]=deltaU; projParamsArrayHost[4*j+2]=deltaV; projParamsArrayHost[4*j+3]=source; } hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*2]); hipStreamSynchronize(stream[dev*2]); cudaCheckErrors("kernel fail"); hipLaunchKernelGGL(( kernelPixelDetector), dim3(grid),dim3(block),0,stream[dev*2], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } // Now that the computation is happening, we need to either prepare the memory for // combining of the projections (splits>1) and start removing previous results. // If our image does not fit in memory then we need to make sure we accumulate previous results too. // This is done in 2 steps: // 1)copy previous results back into GPU // 2)accumulate with current results // The code to take them out is the same as when there are no splits needed if( !fits_in_memory&&sp>0) { // 1) grab previous results and put them in the auxiliary variable dProjection_accum for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(gpuids[dev]); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; hipMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyHostToDevice,stream[dev*2+1]); } // 2) take the results from current compute call and add it to the code in execution. for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(gpuids[dev]); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; hipStreamSynchronize(stream[dev*2+1]); // wait until copy is finished hipLaunchKernelGGL(( vecAddInPlace), dim3((geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS),dim3(MAXTREADS),0,stream[dev*2], dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block); } } // end accumulation case, where the image needs to be split // Now, lets get out the projections from the previous execution of the kernels. if (i>0){ for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(gpuids[dev]); //Global index of FIRST projection on previous set on this GPU proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device; if (dev+1==deviceCount) { //is it the last device? // projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device if (i-1 < noOfKernelCallsLastDev) { // The previous set(block) was not empty. projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global); } else { // The previous set was empty. // This happens if deviceCount > PROJ_PER_BLOCK+1. // e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199. // e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7. break; } } else { projection_this_block=PROJ_PER_BLOCK; } hipMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]); } } // Make sure Computation on kernels has finished before we launch the next batch. for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipStreamSynchronize(stream[dev*2]); } } // We still have the last set of projections to get out of GPUs for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(gpuids[dev]); //Global index of FIRST projection on this set on this GPU proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // How many projections are left here? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) hipDeviceSynchronize(); //Not really necesary, but just in case, we los nothing. cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)"); hipMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]); } // Make sure everyone has done their bussiness before the next image split: hipDeviceSynchronize(); } // End image split loop. cudaCheckErrors("Main loop fail"); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDestroyTextureObject(texImg[dev]); hipFreeArray(d_cuArrTex[dev]); } delete[] texImg; texImg = 0; delete[] d_cuArrTex; d_cuArrTex = 0; // Freeing Stage for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipFree(dProjection[dev*2]); hipFree(dProjection[dev*2+1]); } free(dProjection); if(!fits_in_memory){ for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipFree(dProjection_accum[dev*2]); hipFree(dProjection_accum[dev*2+1]); } free(dProjection_accum); } freeGeoArray(splits,geoArray); hipHostFree(projParamsArrayHost); for (int i = 0; i < nStreams; ++i) hipStreamDestroy(stream[i]) ; #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (splits>1 |deviceCount>1)){ hipHostUnregister(img); } cudaCheckErrors("hipFree fail"); #endif hipDeviceReset(); return 0; } void CreateTexture(const GpuIds& gpuids,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool alloc) { //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); const unsigned int num_devices = gpuids.GetLength(); if(alloc){ for (unsigned int dev = 0; dev < num_devices; dev++){ hipSetDevice(gpuids[dev]); //hipArray Descriptor hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); //cuda Array hipMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent); } } for (unsigned int dev = 0; dev < num_devices; dev++){ hipSetDevice(gpuids[dev]); hipMemcpy3DParms copyParams = {0}; //Array creation copyParams.srcPtr = make_hipPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[dev]; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3DAsync(&copyParams); } for (unsigned int dev = 0; dev < num_devices; dev++){ hipSetDevice(gpuids[dev]); hipResourceDesc texRes; memset(&texRes, 0, sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = d_cuArrTex[dev]; hipTextureDesc texDescr; memset(&texDescr, 0, sizeof(hipTextureDesc)); texDescr.normalizedCoords = false; texDescr.filterMode = hipFilterModePoint; texDescr.addressMode[0] = hipAddressModeBorder; texDescr.addressMode[1] = hipAddressModeBorder; texDescr.addressMode[2] = hipAddressModeBorder; texDescr.readMode = hipReadModeElementType; hipCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL); } for (unsigned int dev = 0; dev < num_devices; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } cudaCheckErrors("Texture object creation fail"); } /* This code generates the geometries needed to split the image properly in * cases where the entire image does not fit in the memory of the GPU **/ void splitImage(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible for(unsigned int sp=0;sp<splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp; geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_Siddon(Geometry geo,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO[i]; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the points where they should be: P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]); Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]); //1: Offset detector //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x; Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i]; Pfinalu0.x=Pu0.x; Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i]; Pfinalv0.x=Pv0.x; Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i]; eulerZYZ(geo,&Pfinal); eulerZYZ(geo,&Pfinalu0); eulerZYZ(geo,&Pfinalv0); eulerZYZ(geo,&S); //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2; S.x =S.x+geo.sVoxelX/2; S.y =S.y+geo.sVoxelY/2; S.z =S.z +geo.sVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S.x+=CORx; S.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S; } #ifndef PROJECTION_HPP float maxDistanceCubeXY(Geometry geo, float alpha,int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY; // Forgetting Z, compute max distance: diagonal+offset maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX; maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY; return geo.DSO[i]/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY); } void rollPitchYaw(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void eulerZYZ(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: checkFreeMemory // // Description: check available memory on devices //______________________________________________________________________________ void checkFreeMemory(const GpuIds& gpuids, size_t *mem_GPU_global){ size_t memfree; size_t memtotal; const int deviceCount = gpuids.GetLength(); for (int dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUmemory","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. } #endif
9b5b1361c4401f3316abcbb5e7619706790fd3db.cu
/*------------------------------------------------------------------------- * * CUDA functions for ray-voxel intersection based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It usesthe so-called * Jacobs algorithm to compute efficiently the length of the x-rays over * voxel space. * * CODE by Ander Biguri * Sepideh Hatamikia (arbitrary rotation) * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "Siddon_projection.hpp" #include "TIGRE_common.hpp" #include <math.h> #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("Ax:Siddon_projection",cudaGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 #define PROJ_PER_BLOCK 9 #define PIXEL_SIZE_BLOCK 9 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTexture(const GpuIds& gpuids,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool alloc); __constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device __global__ void vecAddInPlace(float *a, float *b, unsigned long n) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (idx < n) a[idx] = a[idx] + b[idx]; } __global__ void kernelPixelDetector( Geometry geo, float* detector, const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex){ unsigned long long u = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long v = blockIdx.y * blockDim.y + threadIdx.y; unsigned long long projNumber=threadIdx.z; if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK) return; #if IS_FOR_MATLAB_TIGRE size_t idx = (size_t)(u * (unsigned long long)geo.nDetecV + v)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ; #else size_t idx = (size_t)(v * (unsigned long long)geo.nDetecU + u)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ; #endif unsigned long indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array (for a given GPU) if(indAlpha>=totalNoOfProjections) return; Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaU = projParamsArrayDev[4*projNumber+1]; Point3D deltaV = projParamsArrayDev[4*projNumber+2]; Point3D source = projParamsArrayDev[4*projNumber+3]; /////// Get coordinates XYZ of pixel UV unsigned long pixelV = geo.nDetecV-v-1; unsigned long pixelU = u; Point3D pixel1D; pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); /////// // Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516 ////// // Also called Jacobs algorithms Point3D ray; // vector of Xray ray.x=pixel1D.x-source.x; ray.y=pixel1D.y-source.y; ray.z=pixel1D.z-source.z; float eps=0.001; ray.x=(fabsf(ray.x)<eps)? 0 : ray.x; ray.y=(fabsf(ray.y)<eps)? 0 : ray.y; ray.z=(fabsf(ray.z)<eps)? 0 : ray.z; // This variables are ommited because // bx,by,bz ={0,0,0} // dx,dy,dz ={1,1,1} // compute parameter values for x-ray parametric equation. eq(3-10) float axm,aym,azm; float axM,ayM,azM; // In the paper Nx= number of X planes-> Nvoxel+1 axm=fminf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x)); aym=fminf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y)); azm=fminf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z)); axM=fmaxf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x)); ayM=fmaxf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y)); azM=fmaxf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z)); float am=fmaxf(fmaxf(axm,aym),azm); float aM=fminf(fminf(axM,ayM),azM); // line intersects voxel space -> am<aM if (am>=aM) detector[idx]=0; // Compute max/min image INDEX for intersection eq(11-19) // Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version float imin,imax,jmin,jmax,kmin,kmax; // for X if( source.x<pixel1D.x){ imin=(am==axm)? 1.0f : ceilf (source.x+am*ray.x); imax=(aM==axM)? geo.nVoxelX : floorf(source.x+aM*ray.x); }else{ imax=(am==axm)? geo.nVoxelX-1.0f : floorf(source.x+am*ray.x); imin=(aM==axM)? 0.0f : ceilf (source.x+aM*ray.x); } // for Y if( source.y<pixel1D.y){ jmin=(am==aym)? 1.0f : ceilf (source.y+am*ray.y); jmax=(aM==ayM)? geo.nVoxelY : floorf(source.y+aM*ray.y); }else{ jmax=(am==aym)? geo.nVoxelY-1.0f : floorf(source.y+am*ray.y); jmin=(aM==ayM)? 0.0f : ceilf (source.y+aM*ray.y); } // for Z if( source.z<pixel1D.z){ kmin=(am==azm)? 1.0f : ceilf (source.z+am*ray.z); kmax=(aM==azM)? geo.nVoxelZ : floorf(source.z+aM*ray.z); }else{ kmax=(am==azm)? geo.nVoxelZ-1.0f : floorf(source.z+am*ray.z); kmin=(aM==azM)? 0.0f : ceilf (source.z+aM*ray.z); } // get intersection point N1. eq(20-21) [(also eq 9-10)] float ax,ay,az; ax=(source.x<pixel1D.x)? __fdividef(imin-source.x,ray.x) : __fdividef(imax-source.x,ray.x); ay=(source.y<pixel1D.y)? __fdividef(jmin-source.y,ray.y) : __fdividef(jmax-source.y,ray.y); az=(source.z<pixel1D.z)? __fdividef(kmin-source.z,ray.z) : __fdividef(kmax-source.z,ray.z); // If its Infinite (i.e. ray is parallel to axis), make sure its positive ax=(isinf(ax))? abs(ax) : ax; ay=(isinf(ay))? abs(ay) : ay; az=(isinf(az))? abs(az) : az; // get index of first intersection. eq (26) and (19) unsigned long i,j,k; float aminc=fminf(fminf(ax,ay),az); i=(unsigned long)floorf(source.x+ (aminc+am)*0.5f*ray.x); j=(unsigned long)floorf(source.y+ (aminc+am)*0.5f*ray.y); k=(unsigned long)floorf(source.z+ (aminc+am)*0.5f*ray.z); // Initialize float ac=am; //eq (28), unit anlges float axu,ayu,azu; axu=__frcp_rd(fabsf(ray.x)); ayu=__frcp_rd(fabsf(ray.y)); azu=__frcp_rd(fabsf(ray.z)); // eq(29), direction of update float iu,ju,ku; iu=(source.x< pixel1D.x)? 1.0f : -1.0f; ju=(source.y< pixel1D.y)? 1.0f : -1.0f; ku=(source.z< pixel1D.z)? 1.0f : -1.0f; float maxlength=__fsqrt_rd(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ); float sum=0.0f; unsigned long Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections // Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed i+=0.5f; j+=0.5f; k+=0.5f; for (unsigned long ii=0;ii<Np;ii++){ if (ax==aminc){ sum+=(ax-ac)*tex3D<float>(tex, i, j, k); i=i+iu; ac=ax; ax+=axu; }else if(ay==aminc){ sum+=(ay-ac)*tex3D<float>(tex, i, j, k); j=j+ju; ac=ay; ay+=ayu; }else if(az==aminc){ sum+=(az-ac)*tex3D<float>(tex, i, j, k); k=k+ku; ac=az; az+=azu; } aminc=fminf(fminf(ax,ay),az); } detector[idx]=sum*maxlength; } int siddon_ray_projection(float* img, Geometry geo, float** result,float const * const angles,int nangles, const GpuIds& gpuids){ // Prepare for MultiGPU int deviceCount = gpuids.GetLength(); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning thrown) // Check the available devices, and if they are the same if (!gpuids.AreEqualDevices()) { mexWarnMsgIdAndTxt("Ax:Siddon_projection:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed."); } int dev; // Check free memory size_t mem_GPU_global; checkFreeMemory(gpuids, &mem_GPU_global); size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float); // Does everything fit in the GPUs? const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global; unsigned int splits=1; if (!fits_in_memory) { // Nope nope. // approx free memory we have. We already have left some extra 5% free for internal stuff // we need a second projection memory to combine multi-GPU stuff. size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj; splits=mem_image/mem_free+1;// Ceil of the truncation } Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry)); splitImage(splits,geo,geoArray,nangles); // Allocate axuiliary memory for projections on the GPU to accumulate partial results float ** dProjection_accum; size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float); if (!fits_in_memory){ dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(gpuids[dev]); for (int i = 0; i < 2; ++i){ cudaMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj); cudaMemset(dProjection_accum[dev*2+i],0,num_bytes_proj); cudaCheckErrors("cudaMallocauxiliarty projections fail"); } } } // This is happening regarthless if the image fits on memory float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); for (int i = 0; i < 2; ++i){ cudaMalloc((void**)&dProjection[dev*2+i], num_bytes_proj); cudaMemset(dProjection[dev*2+i] ,0,num_bytes_proj); cudaCheckErrors("cudaMalloc projections fail"); } } //Pagelock memory for synchronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus should have the same attributes. int isHostRegisterSupported = 0; #if CUDART_VERSION >= 9020 cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]); #endif // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big. #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (splits>1 |deviceCount>1)){ cudaHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable); } #endif cudaCheckErrors("Error pinning memory"); // auxiliary variables Point3D source, deltaU, deltaV, uvOrigin; Point3D* projParamsArrayHost; cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D)); cudaCheckErrors("Error allocating auxiliary constant memory"); // Create Streams for overlapping memcopy and compute int nStreams=deviceCount*2; cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));; for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); for (int i = 0; i < 2; ++i){ cudaStreamCreate(&stream[i+dev*2]); } } cudaCheckErrors("Stream creation fail"); int nangles_device=(nangles+deviceCount-1)/deviceCount; int nangles_last_device=(nangles-(deviceCount-1)*nangles_device); unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management. int projection_this_block; cudaTextureObject_t *texImg = new cudaTextureObject_t[deviceCount]; cudaArray **d_cuArrTex = new cudaArray*[deviceCount]; for (unsigned int sp=0;sp<splits;sp++){ // Create texture objects for all GPUs size_t linear_idx_start; //First one should always be the same size as all the rest but the last linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ; CreateTexture(gpuids,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp); cudaCheckErrors("Texture object creation fail"); // Prepare kernel lauch variables int divU,divV; divU=PIXEL_SIZE_BLOCK; divV=PIXEL_SIZE_BLOCK; dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1); dim3 block(divU,divV,PROJ_PER_BLOCK); unsigned int proj_global; // Now that we have prepared the image (piece of image) and parameters for kernels // we project for all angles. for (unsigned int i=0; i<noOfKernelCalls; i++) { for (dev=0;dev<deviceCount;dev++){ cudaSetDevice(gpuids[dev]); for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){ proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device; if (proj_global>=nangles) break; if ((i*PROJ_PER_BLOCK+j)>=nangles_device) break; geoArray[sp].alpha=angles[proj_global*3]; geoArray[sp].theta=angles[proj_global*3+1]; geoArray[sp].psi =angles[proj_global*3+2]; //precomute distances for faster execution //Precompute per angle constant stuff for speed computeDeltas_Siddon(geoArray[sp],proj_global, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[4*j+1]=deltaU; projParamsArrayHost[4*j+2]=deltaV; projParamsArrayHost[4*j+3]=source; } cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*2]); cudaStreamSynchronize(stream[dev*2]); cudaCheckErrors("kernel fail"); kernelPixelDetector<<<grid,block,0,stream[dev*2]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } // Now that the computation is happening, we need to either prepare the memory for // combining of the projections (splits>1) and start removing previous results. // If our image does not fit in memory then we need to make sure we accumulate previous results too. // This is done in 2 steps: // 1)copy previous results back into GPU // 2)accumulate with current results // The code to take them out is the same as when there are no splits needed if( !fits_in_memory&&sp>0) { // 1) grab previous results and put them in the auxiliary variable dProjection_accum for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(gpuids[dev]); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; cudaMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyHostToDevice,stream[dev*2+1]); } // 2) take the results from current compute call and add it to the code in execution. for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(gpuids[dev]); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; cudaStreamSynchronize(stream[dev*2+1]); // wait until copy is finished vecAddInPlace<<<(geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS,MAXTREADS,0,stream[dev*2]>>>(dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block); } } // end accumulation case, where the image needs to be split // Now, lets get out the projections from the previous execution of the kernels. if (i>0){ for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(gpuids[dev]); //Global index of FIRST projection on previous set on this GPU proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device; if (dev+1==deviceCount) { //is it the last device? // projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device if (i-1 < noOfKernelCallsLastDev) { // The previous set(block) was not empty. projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global); } else { // The previous set was empty. // This happens if deviceCount > PROJ_PER_BLOCK+1. // e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199. // e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7. break; } } else { projection_this_block=PROJ_PER_BLOCK; } cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]); } } // Make sure Computation on kernels has finished before we launch the next batch. for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaStreamSynchronize(stream[dev*2]); } } // We still have the last set of projections to get out of GPUs for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(gpuids[dev]); //Global index of FIRST projection on this set on this GPU proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // How many projections are left here? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) cudaDeviceSynchronize(); //Not really necesary, but just in case, we los nothing. cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)"); cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]); } // Make sure everyone has done their bussiness before the next image split: cudaDeviceSynchronize(); } // End image split loop. cudaCheckErrors("Main loop fail"); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDestroyTextureObject(texImg[dev]); cudaFreeArray(d_cuArrTex[dev]); } delete[] texImg; texImg = 0; delete[] d_cuArrTex; d_cuArrTex = 0; // Freeing Stage for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaFree(dProjection[dev*2]); cudaFree(dProjection[dev*2+1]); } free(dProjection); if(!fits_in_memory){ for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaFree(dProjection_accum[dev*2]); cudaFree(dProjection_accum[dev*2+1]); } free(dProjection_accum); } freeGeoArray(splits,geoArray); cudaFreeHost(projParamsArrayHost); for (int i = 0; i < nStreams; ++i) cudaStreamDestroy(stream[i]) ; #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (splits>1 |deviceCount>1)){ cudaHostUnregister(img); } cudaCheckErrors("cudaFree fail"); #endif cudaDeviceReset(); return 0; } void CreateTexture(const GpuIds& gpuids,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool alloc) { //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); const unsigned int num_devices = gpuids.GetLength(); if(alloc){ for (unsigned int dev = 0; dev < num_devices; dev++){ cudaSetDevice(gpuids[dev]); //cudaArray Descriptor cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); //cuda Array cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent); } } for (unsigned int dev = 0; dev < num_devices; dev++){ cudaSetDevice(gpuids[dev]); cudaMemcpy3DParms copyParams = {0}; //Array creation copyParams.srcPtr = make_cudaPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[dev]; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3DAsync(&copyParams); } for (unsigned int dev = 0; dev < num_devices; dev++){ cudaSetDevice(gpuids[dev]); cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = d_cuArrTex[dev]; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = false; texDescr.filterMode = cudaFilterModePoint; texDescr.addressMode[0] = cudaAddressModeBorder; texDescr.addressMode[1] = cudaAddressModeBorder; texDescr.addressMode[2] = cudaAddressModeBorder; texDescr.readMode = cudaReadModeElementType; cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL); } for (unsigned int dev = 0; dev < num_devices; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } cudaCheckErrors("Texture object creation fail"); } /* This code generates the geometries needed to split the image properly in * cases where the entire image does not fit in the memory of the GPU **/ void splitImage(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible for(unsigned int sp=0;sp<splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp; geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_Siddon(Geometry geo,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO[i]; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the points where they should be: P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]); Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]); //1: Offset detector //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x; Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i]; Pfinalu0.x=Pu0.x; Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i]; Pfinalv0.x=Pv0.x; Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i]; eulerZYZ(geo,&Pfinal); eulerZYZ(geo,&Pfinalu0); eulerZYZ(geo,&Pfinalv0); eulerZYZ(geo,&S); //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2; S.x =S.x+geo.sVoxelX/2; S.y =S.y+geo.sVoxelY/2; S.z =S.z +geo.sVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S.x+=CORx; S.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S; } #ifndef PROJECTION_HPP float maxDistanceCubeXY(Geometry geo, float alpha,int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY; // Forgetting Z, compute max distance: diagonal+offset maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX; maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY; return geo.DSO[i]/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY); } void rollPitchYaw(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void eulerZYZ(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: checkFreeMemory // // Description: check available memory on devices //______________________________________________________________________________ void checkFreeMemory(const GpuIds& gpuids, size_t *mem_GPU_global){ size_t memfree; size_t memtotal; const int deviceCount = gpuids.GetLength(); for (int dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUmemory","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. } #endif
1d4348b85687f9326000030028485460b4059962.hip
// !!! This is a file automatically generated by hipify!!! #define FP double #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <math.h> /* Utility function * print out matrix * for debug use only*/ void print(FP *a, int n, int m) { for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { printf("%.6e ", a[i*m+j]); } printf("\n"); } } __global__ void gpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m, int TW) { extern __shared__ FP bigarray[]; FP *atile = &bigarray[0], *btile = &bigarray[TW*TW]; //__shared__ FP atile[TW][TW], btile[TW][TW]; int tx = threadIdx.x, ty = threadIdx.y; int col = tx + blockDim.x * blockIdx.x; int row = ty + blockDim.y * blockIdx.y; FP cvalue = 0.; int tile_num = (int) ceil((double)p/TW); //printf("ceil:%d\n", tile_num); // full size tile for(int i=0; i<tile_num; i++) { //atile[ty][tx] = a[row*p + i*TW + tx]; //load elements into atile //btile[ty][tx] = b[(i*TW+ty)*m + col]; //load elements into btile atile[ty*TW+tx] = a[row*p + i*TW + tx]; btile[ty*TW+tx] = b[(i*TW+ty)*m + col]; __syncthreads(); int boarder = ((p%TW) !=0 && i == tile_num-1) ? p % TW : TW; for(int indexa=ty*TW, indexb=tx; indexa<ty*TW+boarder; indexa++,indexb+=TW) { cvalue += atile[indexa] * btile[indexb]; } //for(int j=0; j<boarder; j++) { //cvalue += atile[ty][j] * btile[j][tx]; //} __syncthreads(); } if(row < n && col < m){ c[row * m + col] = cvalue; } } void kij(FP *a, FP *b, FP *c, int n, int p, int m) { for(int k = 0; k < p; k++) { for(int i = 0; i < n; i++) { //FP r = a[i][k]; FP r = a[i * p + k]; const int baseC = i * m; const int baseB = k * m; for(int j = 0; j < m; j++) c[baseC + j] -= r * b[baseB + j]; } } } void cpu_matrixmult(FP *a,FP *b, FP *c, int n) { int index, indexa, indexb; FP cvalue; for(int col=0;col < n; col++) for(int row=0;row < n; row++) { indexb = col; index = row * n + col; cvalue = 0.; for (indexa = row*n; indexa < (row*n + n); indexa++, indexb+=n) cvalue += a[indexa]*b[indexb]; c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int Grid_Dim_X = 1, Grid_Dim_Y = 1; //Grid dimension, x and y int Block_Dim_X = 1, Block_Dim_Y = 1; //Block dimension, x and y, square int n, p, m; // matrix dimension FP *a,*b,*c; FP *dev_a, *dev_b, *dev_c; hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = hipGetDeviceCount(&gpucount); if (errorcode == hipErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if ((argc<6) || (argc>7)) { printf("Usage: matmul <matrix dim n> <matrix dim p> <matrix dim m> <block dim x> <block dim y> [<dev num>]\n"); exit (-1); } n = atoi(argv[1]); p = atoi(argv[2]); m = atoi(argv[3]); Block_Dim_X = atoi(argv[4]); Block_Dim_Y = atoi(argv[5]); if (Block_Dim_X*Block_Dim_Y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim_X = (int) ceil((double) m / Block_Dim_X); Grid_Dim_Y = (int) ceil((double) n / Block_Dim_Y); //printf("Dimx %d\n",Grid_Dim_X); //printf("Dimy %d\n",Grid_Dim_Y); if (Grid_Dim_X * Grid_Dim_Y * Block_Dim_X * Block_Dim_Y < n * m) { printf("Error, number of threads in x/y dimensions less than number of array elements\n"); exit (-1); } if (argc==7) { gpunum = atoi(argv[6]); // Device number if ((gpunum > 2) || (gpunum < 0)) { printf("Error, Device number must be 0, 1, or 2\n"); exit (-1); } } hipSetDevice(gpunum); printf("Using device %d\n",gpunum); printf("Matrix Dimension = %d %d %d\n",n, p, m); printf("Block_Dim_X = %d, Block_Dim_Y = %d, Grid_Dim_X = %d, Grid_Dim_Y = %d\n", Block_Dim_X, Block_Dim_Y, Grid_Dim_X, Grid_Dim_Y); dim3 Grid(Grid_Dim_X, Grid_Dim_Y); //Grid structure dim3 Block(Block_Dim_X, Block_Dim_Y); //Block structure a = (FP*) malloc(n * p * sizeof(FP)); // dynamically allocated memory for arrays on host b = (FP*) malloc(p * m * sizeof(FP)); c = (FP*) malloc(n * m * sizeof(FP)); // results from GPU srand(12345); for(i=0; i<n; i++) for(j=0; j < p; j++) { a[i * p + j] = (FP) rand() / (FP) RAND_MAX; // a[i * p + j] = (FP) i+j; // may be helpful for debugging } for(i=0; i<p; i++) for(j=0; j<m; j++) { b[i * m + j] = (FP) rand() / (FP) RAND_MAX; // b[i * n + j] = (FP) i+j; // may be helpful for debugging } //printf("A\n"); //print(a, n, p); //printf("B\n"); //print(b, p, m); // ------------- COMPUTATION DONE ON GPU ---------------------------- hipMalloc((void**)&dev_a, n * p * sizeof(FP)); // allocate memory on device hipMalloc((void**)&dev_b, p * m * sizeof(FP)); hipMalloc((void**)&dev_c, n * m * sizeof(FP)); hipMemcpy(dev_a, a , n * p * sizeof(FP), hipMemcpyHostToDevice); hipMemcpy(dev_b, b , p * m * sizeof(FP), hipMemcpyHostToDevice); hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); // hipEventSynchronize(start); // not needed hipFuncSetCacheConfig(gpu_matrixmult, hipFuncCachePreferShared); const int TW = Block_Dim_X; size_t Ns = 2 * TW * TW * sizeof(FP); hipLaunchKernelGGL(( gpu_matrixmult), dim3(Grid),dim3(Block), Ns, 0, dev_a, dev_b, dev_c, n, p, m, TW); hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); hipMemcpy(c,dev_c, n * m * sizeof(FP), hipMemcpyDeviceToHost); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time //print(c, n, m); // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) hipEventRecord(start, 0); // use same timing // hipEventSynchronize(start); // not needed //cpu_matrixmult(a,b,c, n); // do calculation on host (NOTE: This computes the diff with GPU result.) //kij(a, b, c, n, p, m); // hipEventRecord(stop, 0); // instrument code to measue end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //print(c, n, m); // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0; i<n*p; i++) { ai = (double) a[i]; suma += ai * ai; } for(i=0; i<p*m; i++) { bi = (double) b[i]; sumb += bi * bi; } for(i=0; i<n*m; i++) { ci = (double) c[i]; sumc += ci * ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(sqrt(n*m)*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); // -------------- clean up --------------------------------------- free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
1d4348b85687f9326000030028485460b4059962.cu
#define FP double #include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> /* Utility function * print out matrix * for debug use only*/ void print(FP *a, int n, int m) { for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { printf("%.6e ", a[i*m+j]); } printf("\n"); } } __global__ void gpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m, int TW) { extern __shared__ FP bigarray[]; FP *atile = &bigarray[0], *btile = &bigarray[TW*TW]; //__shared__ FP atile[TW][TW], btile[TW][TW]; int tx = threadIdx.x, ty = threadIdx.y; int col = tx + blockDim.x * blockIdx.x; int row = ty + blockDim.y * blockIdx.y; FP cvalue = 0.; int tile_num = (int) ceil((double)p/TW); //printf("ceil:%d\n", tile_num); // full size tile for(int i=0; i<tile_num; i++) { //atile[ty][tx] = a[row*p + i*TW + tx]; //load elements into atile //btile[ty][tx] = b[(i*TW+ty)*m + col]; //load elements into btile atile[ty*TW+tx] = a[row*p + i*TW + tx]; btile[ty*TW+tx] = b[(i*TW+ty)*m + col]; __syncthreads(); int boarder = ((p%TW) !=0 && i == tile_num-1) ? p % TW : TW; for(int indexa=ty*TW, indexb=tx; indexa<ty*TW+boarder; indexa++,indexb+=TW) { cvalue += atile[indexa] * btile[indexb]; } //for(int j=0; j<boarder; j++) { //cvalue += atile[ty][j] * btile[j][tx]; //} __syncthreads(); } if(row < n && col < m){ c[row * m + col] = cvalue; } } void kij(FP *a, FP *b, FP *c, int n, int p, int m) { for(int k = 0; k < p; k++) { for(int i = 0; i < n; i++) { //FP r = a[i][k]; FP r = a[i * p + k]; const int baseC = i * m; const int baseB = k * m; for(int j = 0; j < m; j++) c[baseC + j] -= r * b[baseB + j]; } } } void cpu_matrixmult(FP *a,FP *b, FP *c, int n) { int index, indexa, indexb; FP cvalue; for(int col=0;col < n; col++) for(int row=0;row < n; row++) { indexb = col; index = row * n + col; cvalue = 0.; for (indexa = row*n; indexa < (row*n + n); indexa++, indexb+=n) cvalue += a[indexa]*b[indexb]; c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int Grid_Dim_X = 1, Grid_Dim_Y = 1; //Grid dimension, x and y int Block_Dim_X = 1, Block_Dim_Y = 1; //Block dimension, x and y, square int n, p, m; // matrix dimension FP *a,*b,*c; FP *dev_a, *dev_b, *dev_c; cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if ((argc<6) || (argc>7)) { printf("Usage: matmul <matrix dim n> <matrix dim p> <matrix dim m> <block dim x> <block dim y> [<dev num>]\n"); exit (-1); } n = atoi(argv[1]); p = atoi(argv[2]); m = atoi(argv[3]); Block_Dim_X = atoi(argv[4]); Block_Dim_Y = atoi(argv[5]); if (Block_Dim_X*Block_Dim_Y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim_X = (int) ceil((double) m / Block_Dim_X); Grid_Dim_Y = (int) ceil((double) n / Block_Dim_Y); //printf("Dimx %d\n",Grid_Dim_X); //printf("Dimy %d\n",Grid_Dim_Y); if (Grid_Dim_X * Grid_Dim_Y * Block_Dim_X * Block_Dim_Y < n * m) { printf("Error, number of threads in x/y dimensions less than number of array elements\n"); exit (-1); } if (argc==7) { gpunum = atoi(argv[6]); // Device number if ((gpunum > 2) || (gpunum < 0)) { printf("Error, Device number must be 0, 1, or 2\n"); exit (-1); } } cudaSetDevice(gpunum); printf("Using device %d\n",gpunum); printf("Matrix Dimension = %d %d %d\n",n, p, m); printf("Block_Dim_X = %d, Block_Dim_Y = %d, Grid_Dim_X = %d, Grid_Dim_Y = %d\n", Block_Dim_X, Block_Dim_Y, Grid_Dim_X, Grid_Dim_Y); dim3 Grid(Grid_Dim_X, Grid_Dim_Y); //Grid structure dim3 Block(Block_Dim_X, Block_Dim_Y); //Block structure a = (FP*) malloc(n * p * sizeof(FP)); // dynamically allocated memory for arrays on host b = (FP*) malloc(p * m * sizeof(FP)); c = (FP*) malloc(n * m * sizeof(FP)); // results from GPU srand(12345); for(i=0; i<n; i++) for(j=0; j < p; j++) { a[i * p + j] = (FP) rand() / (FP) RAND_MAX; // a[i * p + j] = (FP) i+j; // may be helpful for debugging } for(i=0; i<p; i++) for(j=0; j<m; j++) { b[i * m + j] = (FP) rand() / (FP) RAND_MAX; // b[i * n + j] = (FP) i+j; // may be helpful for debugging } //printf("A\n"); //print(a, n, p); //printf("B\n"); //print(b, p, m); // ------------- COMPUTATION DONE ON GPU ---------------------------- cudaMalloc((void**)&dev_a, n * p * sizeof(FP)); // allocate memory on device cudaMalloc((void**)&dev_b, p * m * sizeof(FP)); cudaMalloc((void**)&dev_c, n * m * sizeof(FP)); cudaMemcpy(dev_a, a , n * p * sizeof(FP), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b , p * m * sizeof(FP), cudaMemcpyHostToDevice); cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); // cudaEventSynchronize(start); // not needed cudaFuncSetCacheConfig(gpu_matrixmult, cudaFuncCachePreferShared); const int TW = Block_Dim_X; size_t Ns = 2 * TW * TW * sizeof(FP); gpu_matrixmult<<<Grid,Block, Ns>>>(dev_a, dev_b, dev_c, n, p, m, TW); cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); cudaMemcpy(c,dev_c, n * m * sizeof(FP), cudaMemcpyDeviceToHost); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time //print(c, n, m); // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) cudaEventRecord(start, 0); // use same timing // cudaEventSynchronize(start); // not needed //cpu_matrixmult(a,b,c, n); // do calculation on host (NOTE: This computes the diff with GPU result.) //kij(a, b, c, n, p, m); // cudaEventRecord(stop, 0); // instrument code to measue end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //print(c, n, m); // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0; i<n*p; i++) { ai = (double) a[i]; suma += ai * ai; } for(i=0; i<p*m; i++) { bi = (double) b[i]; sumb += bi * bi; } for(i=0; i<n*m; i++) { ci = (double) c[i]; sumc += ci * ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(sqrt(n*m)*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); // -------------- clean up --------------------------------------- free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
25ec46a2c358e958ab2700b850c271b51917c4e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2022 The Microsoft DeepSpeed Team */ #include "conversion_utils.h" #include "inference_cuda_layers.h" #include "memory_access_utils.h" namespace cg = cooperative_groups; #define MAX_CAP 4 #define MAX_SEQ 2048 inline __device__ float gelu(const float x) { const float sqrt_param = 0.79788456080286535587989211986876f; const float mul_param = 0.044715; return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); } template <typename T> __global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size) { // Input restriction: intermediate_size % vals_per_access == 0 constexpr int granularity = 16; constexpr int values_per_access = granularity / sizeof(T); const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; if (offset < total_count) { T data[values_per_access]; T data_bias[values_per_access]; mem_access::load_global<granularity>(data, input + offset); mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size)); #pragma unroll for (int i = 0; i < values_per_access; i++) { float data_f = conversion::to<float>(data[i]); float bias_f = conversion::to<float>(data_bias[i]); data[i] = conversion::to<T>(gelu(data_f + bias_f)); } mem_access::store_global<granularity>(input + offset, data); } } template <typename T> void launch_bias_gelu(T* input, const T* bias, int intermediate_size, int batch_size, hipStream_t stream) { constexpr int threads = 1024; constexpr int granularity = 16; const int total_count = batch_size * intermediate_size; const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream, input, bias, total_count, intermediate_size); } template void launch_bias_gelu<float>(float*, const float*, int, int, hipStream_t); template void launch_bias_gelu<__half>(__half*, const __half*, int, int, hipStream_t); // Not called directly from DeepSpeed, but used in ds_qkv_gemm_int8, ds_linear_layer, etc. __global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) { constexpr int granularity = 16; constexpr int vals_per_access = granularity / sizeof(float); const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * vals_per_access; if (offset < total_count) { float data[vals_per_access]; float bias_data[vals_per_access]; mem_access::load_global<granularity>(data, input + offset); mem_access::load_global<granularity>(bias_data, bias + (offset % hidden_size)); #pragma unroll for (int i = 0; i < vals_per_access; i++) { data[i] += bias_data[i]; } mem_access::store_global<granularity>(input + offset, data); } } __global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) { #ifdef HALF_PRECISION_AVAILABLE constexpr int granularity = 16; constexpr int vals_per_access = granularity / sizeof(__half); const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * vals_per_access; if (offset < total_count) { __half2 data[vals_per_access / 2]; __half2 bias_data[vals_per_access / 2]; mem_access::load_global<granularity>(data, input + offset); mem_access::load_global<granularity>(bias_data, bias + (offset % hidden_size)); #pragma unroll for (int i = 0; i < vals_per_access / 2; i++) { float2 data_f = __half22float2(data[i]); float2 bias_f = __half22float2(bias_data[i]); data[i] = __floats2half2_rn(data_f.x + bias_f.x, data_f.y + bias_f.y); } mem_access::store_global<granularity>(input + offset, data); } #endif } template <typename T> void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, hipStream_t stream) { constexpr int threads = 1024; constexpr int granularity = 16; const int total_count = batch_size * hidden_size; const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); hipLaunchKernelGGL(( fused_bias_add), dim3(grid_dims), dim3(block_dims), 0, stream, input, bias, total_count, hidden_size); } template void launch_bias_add<float>(float*, const float*, int, int, hipStream_t); template void launch_bias_add<__half>(__half*, const __half*, int, int, hipStream_t); __global__ void fused_bias_residual(float* residual, const float* hidden_state, const float* attn, const float* bias, const float* attn_bias, const int total_count, const int intermediate_size, const float mp_scale, const bool preln) { float4* res_fl4_ptr = reinterpret_cast<float4*>(residual); const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state); const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn); const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias); const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float4 res_fl4 = res_fl4_ptr[offset]; const float4 hs_fl4 = hs_fl4_ptr[offset]; const float4 attn_fl4 = attn_fl4_ptr[offset]; const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; if (preln) { // residual = (residual + attention + bias + attention_bias) * // mp_scale + hidden_state res_fl4.x = (res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x); res_fl4.y = (res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y); res_fl4.z = (res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z); res_fl4.w = (res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w); } else { // residual += hidden_state + bias res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x; res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y; res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z; res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w; } res_fl4_ptr[offset] = res_fl4; } } __global__ void fused_bias_residual(__half* residual, const __half* hidden_state, const __half* attn, const __half* bias, const __half* attn_bias, const int total_count, const int intermediate_size, const float mp_scale, const bool preln) { #ifdef HALF_PRECISION_AVAILABLE float2* res_fl2_ptr = reinterpret_cast<float2*>(residual); const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state); const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn); const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias); const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float2 res_fl2 = res_fl2_ptr[offset]; const float2 hs_fl2 = hs_fl2_ptr[offset]; const float2 attn_fl2 = attn_fl2_ptr[offset]; const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2); const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2); const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2); const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2); float2 res_low = __half22float2(res_half2[0]); float2 res_high = __half22float2(res_half2[1]); const float2 hs_low = __half22float2(hs_half2[0]); const float2 hs_high = __half22float2(hs_half2[1]); const float2 attn_low = __half22float2(attn_half2[0]); const float2 attn_high = __half22float2(attn_half2[1]); const float2 bias_low = __half22float2(bias_half2[0]); const float2 bias_high = __half22float2(bias_half2[1]); const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); if (preln) { // residual = (residual + attention + bias + attention_bias) * // mp_scale + hidden_state res_low.x = (res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x; res_low.y = (res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y; res_high.x = (res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x; res_high.y = (res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y; } else { // residual += hidden_state + bias res_low.x = (res_low.x + hs_low.x + bias_low.x); res_low.y = (res_low.y + hs_low.y + bias_low.y); res_high.x = (res_high.x + hs_high.x + bias_high.x); res_high.y = (res_high.y + hs_high.y + bias_high.y); } res_half2[0] = __float22half2_rn(res_low); res_half2[1] = __float22half2_rn(res_high); res_fl2_ptr[offset] = res_fl2; } #endif } template <typename T> void launch_bias_residual(T* residual, T* hidden_state, T* attn, T* bias, T* attn_bias, int batch, int hidden_dim, int mp_size, bool preln, hipStream_t stream) { int total_count = batch * hidden_dim / 4; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); hipLaunchKernelGGL(( fused_bias_residual), dim3(grid_dims), dim3(block_dims), 0, stream, residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size, preln); } template void launch_bias_residual< float>(float*, float*, float*, float*, float*, int, int, int, bool, hipStream_t); template void launch_bias_residual< __half>(__half*, __half*, __half*, __half*, __half*, int, int, int, bool, hipStream_t); __global__ void gptj_residual_add(float* residual, const float* hidden_state, const float* attn, const float* bias, const float* attn_bias, const int total_count, const int intermediate_size, const float mp_scale) { float4* res_fl4_ptr = reinterpret_cast<float4*>(residual); const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state); const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn); const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias); const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float4 res_fl4 = res_fl4_ptr[offset]; const float4 hs_fl4 = hs_fl4_ptr[offset]; const float4 attn_fl4 = attn_fl4_ptr[offset]; const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; if (attn_bias) { float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; // residual += attention_bias res_fl4.x += attn_bias_fl4.x; res_fl4.y += attn_bias_fl4.y; res_fl4.z += attn_bias_fl4.z; res_fl4.w += attn_bias_fl4.w; } // residual = hidden_state + attention + (residual + bias) * mp_scale res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale; res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale; res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale; res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale; res_fl4_ptr[offset] = res_fl4; } } __global__ void gptj_residual_add(__half* residual, const __half* hidden_state, const __half* attn, const __half* bias, const __half* attn_bias, const int total_count, const int intermediate_size, const float mp_scale) { #ifdef HALF_PRECISION_AVAILABLE float2* res_fl2_ptr = reinterpret_cast<float2*>(residual); const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state); const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn); const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias); const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float2 res_fl2 = res_fl2_ptr[offset]; const float2 hs_fl2 = hs_fl2_ptr[offset]; const float2 attn_fl2 = attn_fl2_ptr[offset]; const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2); const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2); const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2); float2 res_low = __half22float2(res_half2[0]); float2 res_high = __half22float2(res_half2[1]); const float2 hs_low = __half22float2(hs_half2[0]); const float2 hs_high = __half22float2(hs_half2[1]); const float2 attn_low = __half22float2(attn_half2[0]); const float2 attn_high = __half22float2(attn_half2[1]); const float2 bias_low = __half22float2(bias_half2[0]); const float2 bias_high = __half22float2(bias_half2[1]); if (attn_bias) { const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2); const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); // residual += attention_bias res_low.x += attn_bias_low.x; res_low.y += attn_bias_low.y; res_high.x += attn_bias_high.x; res_high.y += attn_bias_high.y; } // residual = hidden_state + attention + (residual + bias) * mp_scale res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale; res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale; res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale; res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale; res_half2[0] = __float22half2_rn(res_low); res_half2[1] = __float22half2_rn(res_high); res_fl2_ptr[offset] = res_fl2; } #endif } template <typename T> void launch_gptj_residual_add(T* residual, T* hidden_state, T* attn, T* bias, T* attn_bias, int hidden_dim, int batch, int mp_size, hipStream_t stream) { int total_count = batch * hidden_dim / 4; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); hipLaunchKernelGGL(( gptj_residual_add), dim3(grid_dims), dim3(block_dims), 0, stream, residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); } template void launch_gptj_residual_add<float>(float*, float*, float*, float*, float*, int, int, int, hipStream_t); template void launch_gptj_residual_add<__half>(__half*, __half*, __half*, __half*, __half*, int, int, int, hipStream_t); template <typename T> __global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim) { constexpr int granularity = 16; constexpr int vals_per_access = granularity / sizeof(T); T* residual_seq = residual + blockIdx.x * hidden_dim; T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim; for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim; tid += blockDim.x * vals_per_access) { T mlp[vals_per_access]; T res[vals_per_access]; T coef1[vals_per_access]; T coef2[vals_per_access]; mem_access::load_global<granularity>(mlp, mlp_out_seq + tid); mem_access::load_global<granularity>(res, residual_seq + tid); mem_access::load_global<granularity>(coef1, coef + tid); mem_access::load_global<granularity>(coef2, coef + tid + hidden_dim); #pragma unroll for (int idx = 0; idx < vals_per_access; idx++) { mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx]; } mem_access::store_global<granularity>(mlp_out_seq + tid, mlp); } } template <typename T> void launch_moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim, hipStream_t stream) { dim3 grid_dim(seq_len); dim3 block_dim(1024); hipLaunchKernelGGL(( moe_res_matmul), dim3(grid_dim), dim3(block_dim), 0, stream, residual, coef, mlp_out, seq_len, hidden_dim); } template void launch_moe_res_matmul(float* residual, float* coef, float* mlp_out, int seq_len, int hidden_dim, hipStream_t stream); template void launch_moe_res_matmul(__half* residual, __half* coef, __half* mlp_out, int seq_len, int hidden_dim, hipStream_t stream); __global__ void pad_data_kernel(__half* padded_output, __half* output, int head_size, int padded_head_size) { float4* padded_output_cast = reinterpret_cast<float4*>(padded_output); float4* output_cast = reinterpret_cast<float4*>(output); int bid = blockIdx.x * (blockDim.y) + threadIdx.y; int idx = threadIdx.x; padded_output_cast += (bid * padded_head_size); output_cast += (bid * head_size); float4 ZERO; const __half2 zero_h = __float2half2_rn(0.f); __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); #pragma unroll for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; if (idx < head_size) padded_output_cast[idx] = output_cast[idx]; else padded_output_cast[idx] = ZERO; } __global__ void pad_data_kernel(float* padded_output, float* output, int head_size, int padded_head_size) { } template <typename T> void pad_data(T* padded_output, T* output, int bsz, int head_size, int padded_head_size, hipStream_t stream) { dim3 grid_dim((bsz - 1) / 16 + 1); dim3 block_dim(padded_head_size / 8, 16); hipLaunchKernelGGL(( pad_data_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, padded_output, output, head_size / 8, padded_head_size / 8); } template void pad_data(__half* padded_output, __half* output, int bsz, int head_size, int padded_head_size, hipStream_t stream); template void pad_data(float* padded_output, float* output, int bsz, int head_size, int padded_head_size, hipStream_t stream); __global__ void pad_head_seq_kernel(__half* padded_output, __half* output, int seq_len, int padded_seq_len, int head_size, int padded_head_size) { float4* padded_output_cast = reinterpret_cast<float4*>(padded_output); float4* output_cast = reinterpret_cast<float4*>(output); int bsz = blockIdx.x; int bid = blockIdx.y * (blockDim.y) + threadIdx.y; int idx = threadIdx.x; padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size; output_cast += (bsz * seq_len + bid) * head_size; float4 ZERO; const __half2 zero_h = __float2half2_rn(0.f); __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); #pragma unroll for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; if (idx < head_size && bid < seq_len) padded_output_cast[idx] = output_cast[idx]; else padded_output_cast[idx] = ZERO; } __global__ void pad_head_seq_kernel(float* padded_output, float* output, int seq_len, int padded_seq_len, int head_size, int padded_head_size) { } template <typename T> void pad_head_seq(T* padded_output, T* output, int bsz, int seq_len, int padded_seq_len, int head_size, int padded_head_size, hipStream_t stream) { dim3 grid_dim(bsz, padded_seq_len / 16); dim3 block_dim(padded_head_size / 8, 16); hipLaunchKernelGGL(( pad_head_seq_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8); } template void pad_head_seq(__half* padded_output, __half* output, int bsz, int seq_len, int padded_seq_len, int head_size, int padded_head_size, hipStream_t stream); template void pad_head_seq(float* padded_output, float* output, int bsz, int seq_len, int padded_seq_len, int head_size, int padded_head_size, hipStream_t stream);
25ec46a2c358e958ab2700b850c271b51917c4e6.cu
/* Copyright 2022 The Microsoft DeepSpeed Team */ #include "conversion_utils.h" #include "inference_cuda_layers.h" #include "memory_access_utils.h" namespace cg = cooperative_groups; #define MAX_CAP 4 #define MAX_SEQ 2048 inline __device__ float gelu(const float x) { const float sqrt_param = 0.79788456080286535587989211986876f; const float mul_param = 0.044715; return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); } template <typename T> __global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size) { // Input restriction: intermediate_size % vals_per_access == 0 constexpr int granularity = 16; constexpr int values_per_access = granularity / sizeof(T); const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; if (offset < total_count) { T data[values_per_access]; T data_bias[values_per_access]; mem_access::load_global<granularity>(data, input + offset); mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size)); #pragma unroll for (int i = 0; i < values_per_access; i++) { float data_f = conversion::to<float>(data[i]); float bias_f = conversion::to<float>(data_bias[i]); data[i] = conversion::to<T>(gelu(data_f + bias_f)); } mem_access::store_global<granularity>(input + offset, data); } } template <typename T> void launch_bias_gelu(T* input, const T* bias, int intermediate_size, int batch_size, cudaStream_t stream) { constexpr int threads = 1024; constexpr int granularity = 16; const int total_count = batch_size * intermediate_size; const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); fused_bias_gelu<<<grid_dims, block_dims, 0, stream>>>( input, bias, total_count, intermediate_size); } template void launch_bias_gelu<float>(float*, const float*, int, int, cudaStream_t); template void launch_bias_gelu<__half>(__half*, const __half*, int, int, cudaStream_t); // Not called directly from DeepSpeed, but used in ds_qkv_gemm_int8, ds_linear_layer, etc. __global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) { constexpr int granularity = 16; constexpr int vals_per_access = granularity / sizeof(float); const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * vals_per_access; if (offset < total_count) { float data[vals_per_access]; float bias_data[vals_per_access]; mem_access::load_global<granularity>(data, input + offset); mem_access::load_global<granularity>(bias_data, bias + (offset % hidden_size)); #pragma unroll for (int i = 0; i < vals_per_access; i++) { data[i] += bias_data[i]; } mem_access::store_global<granularity>(input + offset, data); } } __global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) { #ifdef HALF_PRECISION_AVAILABLE constexpr int granularity = 16; constexpr int vals_per_access = granularity / sizeof(__half); const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * vals_per_access; if (offset < total_count) { __half2 data[vals_per_access / 2]; __half2 bias_data[vals_per_access / 2]; mem_access::load_global<granularity>(data, input + offset); mem_access::load_global<granularity>(bias_data, bias + (offset % hidden_size)); #pragma unroll for (int i = 0; i < vals_per_access / 2; i++) { float2 data_f = __half22float2(data[i]); float2 bias_f = __half22float2(bias_data[i]); data[i] = __floats2half2_rn(data_f.x + bias_f.x, data_f.y + bias_f.y); } mem_access::store_global<granularity>(input + offset, data); } #endif } template <typename T> void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream) { constexpr int threads = 1024; constexpr int granularity = 16; const int total_count = batch_size * hidden_size; const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); fused_bias_add<<<grid_dims, block_dims, 0, stream>>>(input, bias, total_count, hidden_size); } template void launch_bias_add<float>(float*, const float*, int, int, cudaStream_t); template void launch_bias_add<__half>(__half*, const __half*, int, int, cudaStream_t); __global__ void fused_bias_residual(float* residual, const float* hidden_state, const float* attn, const float* bias, const float* attn_bias, const int total_count, const int intermediate_size, const float mp_scale, const bool preln) { float4* res_fl4_ptr = reinterpret_cast<float4*>(residual); const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state); const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn); const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias); const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float4 res_fl4 = res_fl4_ptr[offset]; const float4 hs_fl4 = hs_fl4_ptr[offset]; const float4 attn_fl4 = attn_fl4_ptr[offset]; const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; if (preln) { // residual = (residual + attention + bias + attention_bias) * // mp_scale + hidden_state res_fl4.x = (res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x); res_fl4.y = (res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y); res_fl4.z = (res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z); res_fl4.w = (res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w); } else { // residual += hidden_state + bias res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x; res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y; res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z; res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w; } res_fl4_ptr[offset] = res_fl4; } } __global__ void fused_bias_residual(__half* residual, const __half* hidden_state, const __half* attn, const __half* bias, const __half* attn_bias, const int total_count, const int intermediate_size, const float mp_scale, const bool preln) { #ifdef HALF_PRECISION_AVAILABLE float2* res_fl2_ptr = reinterpret_cast<float2*>(residual); const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state); const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn); const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias); const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float2 res_fl2 = res_fl2_ptr[offset]; const float2 hs_fl2 = hs_fl2_ptr[offset]; const float2 attn_fl2 = attn_fl2_ptr[offset]; const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2); const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2); const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2); const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2); float2 res_low = __half22float2(res_half2[0]); float2 res_high = __half22float2(res_half2[1]); const float2 hs_low = __half22float2(hs_half2[0]); const float2 hs_high = __half22float2(hs_half2[1]); const float2 attn_low = __half22float2(attn_half2[0]); const float2 attn_high = __half22float2(attn_half2[1]); const float2 bias_low = __half22float2(bias_half2[0]); const float2 bias_high = __half22float2(bias_half2[1]); const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); if (preln) { // residual = (residual + attention + bias + attention_bias) * // mp_scale + hidden_state res_low.x = (res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x; res_low.y = (res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y; res_high.x = (res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x; res_high.y = (res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y; } else { // residual += hidden_state + bias res_low.x = (res_low.x + hs_low.x + bias_low.x); res_low.y = (res_low.y + hs_low.y + bias_low.y); res_high.x = (res_high.x + hs_high.x + bias_high.x); res_high.y = (res_high.y + hs_high.y + bias_high.y); } res_half2[0] = __float22half2_rn(res_low); res_half2[1] = __float22half2_rn(res_high); res_fl2_ptr[offset] = res_fl2; } #endif } template <typename T> void launch_bias_residual(T* residual, T* hidden_state, T* attn, T* bias, T* attn_bias, int batch, int hidden_dim, int mp_size, bool preln, cudaStream_t stream) { int total_count = batch * hidden_dim / 4; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); fused_bias_residual<<<grid_dims, block_dims, 0, stream>>>(residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size, preln); } template void launch_bias_residual< float>(float*, float*, float*, float*, float*, int, int, int, bool, cudaStream_t); template void launch_bias_residual< __half>(__half*, __half*, __half*, __half*, __half*, int, int, int, bool, cudaStream_t); __global__ void gptj_residual_add(float* residual, const float* hidden_state, const float* attn, const float* bias, const float* attn_bias, const int total_count, const int intermediate_size, const float mp_scale) { float4* res_fl4_ptr = reinterpret_cast<float4*>(residual); const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state); const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn); const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias); const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float4 res_fl4 = res_fl4_ptr[offset]; const float4 hs_fl4 = hs_fl4_ptr[offset]; const float4 attn_fl4 = attn_fl4_ptr[offset]; const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; if (attn_bias) { float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; // residual += attention_bias res_fl4.x += attn_bias_fl4.x; res_fl4.y += attn_bias_fl4.y; res_fl4.z += attn_bias_fl4.z; res_fl4.w += attn_bias_fl4.w; } // residual = hidden_state + attention + (residual + bias) * mp_scale res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale; res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale; res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale; res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale; res_fl4_ptr[offset] = res_fl4; } } __global__ void gptj_residual_add(__half* residual, const __half* hidden_state, const __half* attn, const __half* bias, const __half* attn_bias, const int total_count, const int intermediate_size, const float mp_scale) { #ifdef HALF_PRECISION_AVAILABLE float2* res_fl2_ptr = reinterpret_cast<float2*>(residual); const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state); const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn); const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias); const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias); const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { float2 res_fl2 = res_fl2_ptr[offset]; const float2 hs_fl2 = hs_fl2_ptr[offset]; const float2 attn_fl2 = attn_fl2_ptr[offset]; const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2); const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2); const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2); float2 res_low = __half22float2(res_half2[0]); float2 res_high = __half22float2(res_half2[1]); const float2 hs_low = __half22float2(hs_half2[0]); const float2 hs_high = __half22float2(hs_half2[1]); const float2 attn_low = __half22float2(attn_half2[0]); const float2 attn_high = __half22float2(attn_half2[1]); const float2 bias_low = __half22float2(bias_half2[0]); const float2 bias_high = __half22float2(bias_half2[1]); if (attn_bias) { const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2); const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); // residual += attention_bias res_low.x += attn_bias_low.x; res_low.y += attn_bias_low.y; res_high.x += attn_bias_high.x; res_high.y += attn_bias_high.y; } // residual = hidden_state + attention + (residual + bias) * mp_scale res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale; res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale; res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale; res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale; res_half2[0] = __float22half2_rn(res_low); res_half2[1] = __float22half2_rn(res_high); res_fl2_ptr[offset] = res_fl2; } #endif } template <typename T> void launch_gptj_residual_add(T* residual, T* hidden_state, T* attn, T* bias, T* attn_bias, int hidden_dim, int batch, int mp_size, cudaStream_t stream) { int total_count = batch * hidden_dim / 4; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); gptj_residual_add<<<grid_dims, block_dims, 0, stream>>>( residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); } template void launch_gptj_residual_add<float>(float*, float*, float*, float*, float*, int, int, int, cudaStream_t); template void launch_gptj_residual_add<__half>(__half*, __half*, __half*, __half*, __half*, int, int, int, cudaStream_t); template <typename T> __global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim) { constexpr int granularity = 16; constexpr int vals_per_access = granularity / sizeof(T); T* residual_seq = residual + blockIdx.x * hidden_dim; T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim; for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim; tid += blockDim.x * vals_per_access) { T mlp[vals_per_access]; T res[vals_per_access]; T coef1[vals_per_access]; T coef2[vals_per_access]; mem_access::load_global<granularity>(mlp, mlp_out_seq + tid); mem_access::load_global<granularity>(res, residual_seq + tid); mem_access::load_global<granularity>(coef1, coef + tid); mem_access::load_global<granularity>(coef2, coef + tid + hidden_dim); #pragma unroll for (int idx = 0; idx < vals_per_access; idx++) { mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx]; } mem_access::store_global<granularity>(mlp_out_seq + tid, mlp); } } template <typename T> void launch_moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim, cudaStream_t stream) { dim3 grid_dim(seq_len); dim3 block_dim(1024); moe_res_matmul<<<grid_dim, block_dim, 0, stream>>>( residual, coef, mlp_out, seq_len, hidden_dim); } template void launch_moe_res_matmul(float* residual, float* coef, float* mlp_out, int seq_len, int hidden_dim, cudaStream_t stream); template void launch_moe_res_matmul(__half* residual, __half* coef, __half* mlp_out, int seq_len, int hidden_dim, cudaStream_t stream); __global__ void pad_data_kernel(__half* padded_output, __half* output, int head_size, int padded_head_size) { float4* padded_output_cast = reinterpret_cast<float4*>(padded_output); float4* output_cast = reinterpret_cast<float4*>(output); int bid = blockIdx.x * (blockDim.y) + threadIdx.y; int idx = threadIdx.x; padded_output_cast += (bid * padded_head_size); output_cast += (bid * head_size); float4 ZERO; const __half2 zero_h = __float2half2_rn(0.f); __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); #pragma unroll for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; if (idx < head_size) padded_output_cast[idx] = output_cast[idx]; else padded_output_cast[idx] = ZERO; } __global__ void pad_data_kernel(float* padded_output, float* output, int head_size, int padded_head_size) { } template <typename T> void pad_data(T* padded_output, T* output, int bsz, int head_size, int padded_head_size, cudaStream_t stream) { dim3 grid_dim((bsz - 1) / 16 + 1); dim3 block_dim(padded_head_size / 8, 16); pad_data_kernel<<<grid_dim, block_dim, 0, stream>>>( padded_output, output, head_size / 8, padded_head_size / 8); } template void pad_data(__half* padded_output, __half* output, int bsz, int head_size, int padded_head_size, cudaStream_t stream); template void pad_data(float* padded_output, float* output, int bsz, int head_size, int padded_head_size, cudaStream_t stream); __global__ void pad_head_seq_kernel(__half* padded_output, __half* output, int seq_len, int padded_seq_len, int head_size, int padded_head_size) { float4* padded_output_cast = reinterpret_cast<float4*>(padded_output); float4* output_cast = reinterpret_cast<float4*>(output); int bsz = blockIdx.x; int bid = blockIdx.y * (blockDim.y) + threadIdx.y; int idx = threadIdx.x; padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size; output_cast += (bsz * seq_len + bid) * head_size; float4 ZERO; const __half2 zero_h = __float2half2_rn(0.f); __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); #pragma unroll for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; if (idx < head_size && bid < seq_len) padded_output_cast[idx] = output_cast[idx]; else padded_output_cast[idx] = ZERO; } __global__ void pad_head_seq_kernel(float* padded_output, float* output, int seq_len, int padded_seq_len, int head_size, int padded_head_size) { } template <typename T> void pad_head_seq(T* padded_output, T* output, int bsz, int seq_len, int padded_seq_len, int head_size, int padded_head_size, cudaStream_t stream) { dim3 grid_dim(bsz, padded_seq_len / 16); dim3 block_dim(padded_head_size / 8, 16); pad_head_seq_kernel<<<grid_dim, block_dim, 0, stream>>>( padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8); } template void pad_head_seq(__half* padded_output, __half* output, int bsz, int seq_len, int padded_seq_len, int head_size, int padded_head_size, cudaStream_t stream); template void pad_head_seq(float* padded_output, float* output, int bsz, int seq_len, int padded_seq_len, int head_size, int padded_head_size, cudaStream_t stream);
47ecd17fdefb04395d323905b58260fadf01d3a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Author: Ross Bearden Instructor: Dr. Pettey Class: CSCI 4330 Date: 05/03/17 Purpose: This program will calculate the sum of two matrices using GPUs and the cuda language to do the calculations */ #include <stdio.h> #include <stdlib.h> __global__ void vectorAdd( int* vector1, int* vector2, int* vectorResult) { vectorResult[threadIdx.x] = vector1[threadIdx.x] + vector2[threadIdx.x]; } int main(int argc, char* argv[]) { int rows = 16; int columns = 32; int i,j; int firstArray[rows][columns]; int secondArray[rows][columns]; //Read the matrices for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ scanf("%d", &firstArray[i][j]); } } for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ scanf("%d", &secondArray[i][j]); } } /* for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ printf("%d", firstArray[i][j]); } } for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ printf("%d", secondArray[i][j]); } } */ int* vector1; int* vector2; int sizeOfVector = 512; int* deviceResults; int hostResults[512]; hipMalloc((void**) &vector1, sizeof(int) * sizeOfVector); hipMalloc((void**) &vector2, sizeof(int) * sizeOfVector); hipMalloc((void**) &deviceResults, sizeof(int) * sizeOfVector); hipMemcpy(vector1, firstArray, sizeof(int) * sizeOfVector, hipMemcpyHostToDevice); hipMemcpy(vector2, secondArray, sizeof(int) * sizeOfVector, hipMemcpyHostToDevice); hipLaunchKernelGGL(( vectorAdd), dim3(1), dim3(sizeOfVector), 0, 0, vector1, vector2, deviceResults); hipMemcpy(hostResults, deviceResults, sizeof(int) * sizeOfVector, hipMemcpyDeviceToHost); for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ printf("%i ", hostResults[j]); } printf("\n"); } hipFree(vector1); hipFree(vector2); hipFree(deviceResults); return 0; }
47ecd17fdefb04395d323905b58260fadf01d3a8.cu
/* Author: Ross Bearden Instructor: Dr. Pettey Class: CSCI 4330 Date: 05/03/17 Purpose: This program will calculate the sum of two matrices using GPUs and the cuda language to do the calculations */ #include <stdio.h> #include <stdlib.h> __global__ void vectorAdd( int* vector1, int* vector2, int* vectorResult) { vectorResult[threadIdx.x] = vector1[threadIdx.x] + vector2[threadIdx.x]; } int main(int argc, char* argv[]) { int rows = 16; int columns = 32; int i,j; int firstArray[rows][columns]; int secondArray[rows][columns]; //Read the matrices for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ scanf("%d", &firstArray[i][j]); } } for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ scanf("%d", &secondArray[i][j]); } } /* for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ printf("%d", firstArray[i][j]); } } for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ printf("%d", secondArray[i][j]); } } */ int* vector1; int* vector2; int sizeOfVector = 512; int* deviceResults; int hostResults[512]; cudaMalloc((void**) &vector1, sizeof(int) * sizeOfVector); cudaMalloc((void**) &vector2, sizeof(int) * sizeOfVector); cudaMalloc((void**) &deviceResults, sizeof(int) * sizeOfVector); cudaMemcpy(vector1, firstArray, sizeof(int) * sizeOfVector, cudaMemcpyHostToDevice); cudaMemcpy(vector2, secondArray, sizeof(int) * sizeOfVector, cudaMemcpyHostToDevice); vectorAdd<<<1, sizeOfVector>>> (vector1, vector2, deviceResults); cudaMemcpy(hostResults, deviceResults, sizeof(int) * sizeOfVector, cudaMemcpyDeviceToHost); for(i = 0; i < rows; i++){ for(j = 0; j < columns; j++){ printf("%i ", hostResults[j]); } printf("\n"); } cudaFree(vector1); cudaFree(vector2); cudaFree(deviceResults); return 0; }
46930d4ff6b9ae99fd613cb3144117356b175dbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../cuda_utils.h" #include "interpolation_cuda_kernel.h" __global__ void interpolation_forward_cuda_kernel(int n, int c, int k, const float *input, const int *idx, const float *weight, float *output) { // input: input: (m, c), idx: (n, k), weight: (n, k), output: output (n, c) int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n * c) return; int c_idx = index % c; int n_idx = index / c; for (int i = 0; i < k; i++) { int idx_idx = n_idx * k + i; int input_idx = idx[idx_idx] * c + c_idx; output[index] += input[input_idx] * weight[idx_idx]; } } __global__ void interpolation_backward_cuda_kernel(int n, int c, int k, const float *grad_output, const int *idx, const float *weight, float *grad_input) { // input: grad_output: (n, c), idx: (n, k), weight: (n, k), output: grad_input (m, c) int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n * c) return; int c_idx = index % c; int n_idx = index / c; for (int i = 0; i < k; i++) { int idx_idx = n_idx * k + i; int input_idx = idx[idx_idx] * c + c_idx; atomicAdd(grad_input + input_idx, grad_output[index] * weight[idx_idx]); } } void interpolation_forward_cuda_launcher(int n, int c, int k, const float *input, const int *idx, const float *weight, float *output) { // input: input: (m, c), idx: (n, k), weight: (n, k), output: output (n, c) dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( interpolation_forward_cuda_kernel), dim3(blocks), dim3(threads), 0, 0, n, c, k, input, idx, weight, output); } void interpolation_backward_cuda_launcher(int n, int c, int k, const float *grad_output, const int *idx, const float *weight, float *grad_input) { // input: grad_output: (n, c), idx: (n, k), weight: (n, k), output: grad_input (m, c) dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( interpolation_backward_cuda_kernel), dim3(blocks), dim3(threads), 0, 0, n, c, k, grad_output, idx, weight, grad_input); }
46930d4ff6b9ae99fd613cb3144117356b175dbf.cu
#include "../cuda_utils.h" #include "interpolation_cuda_kernel.h" __global__ void interpolation_forward_cuda_kernel(int n, int c, int k, const float *input, const int *idx, const float *weight, float *output) { // input: input: (m, c), idx: (n, k), weight: (n, k), output: output (n, c) int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n * c) return; int c_idx = index % c; int n_idx = index / c; for (int i = 0; i < k; i++) { int idx_idx = n_idx * k + i; int input_idx = idx[idx_idx] * c + c_idx; output[index] += input[input_idx] * weight[idx_idx]; } } __global__ void interpolation_backward_cuda_kernel(int n, int c, int k, const float *grad_output, const int *idx, const float *weight, float *grad_input) { // input: grad_output: (n, c), idx: (n, k), weight: (n, k), output: grad_input (m, c) int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n * c) return; int c_idx = index % c; int n_idx = index / c; for (int i = 0; i < k; i++) { int idx_idx = n_idx * k + i; int input_idx = idx[idx_idx] * c + c_idx; atomicAdd(grad_input + input_idx, grad_output[index] * weight[idx_idx]); } } void interpolation_forward_cuda_launcher(int n, int c, int k, const float *input, const int *idx, const float *weight, float *output) { // input: input: (m, c), idx: (n, k), weight: (n, k), output: output (n, c) dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); interpolation_forward_cuda_kernel<<<blocks, threads, 0>>>(n, c, k, input, idx, weight, output); } void interpolation_backward_cuda_launcher(int n, int c, int k, const float *grad_output, const int *idx, const float *weight, float *grad_input) { // input: grad_output: (n, c), idx: (n, k), weight: (n, k), output: grad_input (m, c) dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); interpolation_backward_cuda_kernel<<<blocks, threads, 0>>>(n, c, k, grad_output, idx, weight, grad_input); }
ec80120a2ae661fd2a6280e39aba09b249891041.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void rmspropKernel ( int numberIterations, int* parameterIndices, int* counts, int dimension, float* parameters, float* gradient, float learningRate, float decay, float oneMinusDecay, float epsilon, float* accumulation) { int updateIndex = blockIdx.x; int parameterIndex = parameterIndices[updateIndex]; int count = counts[updateIndex]; if(parameterIndex != -1 && count > 0) { float scalingFactor = 1.0f / (float)count; int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations; int firstParameterEntryIndex = parameterIndex * dimension; int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex; int startGradientEntryIndex = updateIndex * dimension + startEntryIndex; int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension); int parameterEntryIndex = startParameterEntryIndex; int gradientEntryIndex = startGradientEntryIndex; while(parameterEntryIndex < exclusiveEndParameterEntryIndex) { float scaledDerivative = scalingFactor * gradient[gradientEntryIndex]; float updatedAccumulation = decay * accumulation[parameterEntryIndex] + oneMinusDecay * (scaledDerivative * scaledDerivative); accumulation[parameterEntryIndex] = updatedAccumulation; float adaptiveLearningRate = learningRate / sqrtf(updatedAccumulation + epsilon); float update = -adaptiveLearningRate * scaledDerivative; parameters[parameterEntryIndex] += update; parameterEntryIndex++; gradientEntryIndex++; } } }
ec80120a2ae661fd2a6280e39aba09b249891041.cu
#include "includes.h" __global__ void rmspropKernel ( int numberIterations, int* parameterIndices, int* counts, int dimension, float* parameters, float* gradient, float learningRate, float decay, float oneMinusDecay, float epsilon, float* accumulation) { int updateIndex = blockIdx.x; int parameterIndex = parameterIndices[updateIndex]; int count = counts[updateIndex]; if(parameterIndex != -1 && count > 0) { float scalingFactor = 1.0f / (float)count; int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations; int firstParameterEntryIndex = parameterIndex * dimension; int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex; int startGradientEntryIndex = updateIndex * dimension + startEntryIndex; int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension); int parameterEntryIndex = startParameterEntryIndex; int gradientEntryIndex = startGradientEntryIndex; while(parameterEntryIndex < exclusiveEndParameterEntryIndex) { float scaledDerivative = scalingFactor * gradient[gradientEntryIndex]; float updatedAccumulation = decay * accumulation[parameterEntryIndex] + oneMinusDecay * (scaledDerivative * scaledDerivative); accumulation[parameterEntryIndex] = updatedAccumulation; float adaptiveLearningRate = learningRate / sqrtf(updatedAccumulation + epsilon); float update = -adaptiveLearningRate * scaledDerivative; parameters[parameterEntryIndex] += update; parameterEntryIndex++; gradientEntryIndex++; } } }
428cebcd017e45a48795fb3acb0973e32fe2d622.hip
// !!! This is a file automatically generated by hipify!!! // // cudaGMRES.cu // Cuda GMRES // // Created by Tim Ioannidis on 2/18/12. // Copyright 2012 Chemeng NTUA. All rights reserved. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #include "structs.h" #include "parameters.h" #include "extern.h" #include "cuda_config.h" #include "cuda_methods.h" #include "cuda_dot.cu" #include "cuda_initial2.cu" #include "cuda_matvec.cu" #include "cuda_matvecn.cu" #include "cuda_vec_update.cu" #include "hip_reduction.hip" #include "cuda_norm.cu" #include "cuda_vec_replace.cu" #include "cuda_vec_replace2.cu" #include "cuda_matvec_up.cu" #include "cuda_leastsq.cu" /////////////////////////////////////////////////////// ////// SOS!!!!! O DISDIASTATOS /////// ////// PINAKAS u_base EINAI KATA STILI /////// ////// E.G. A[i][j]=A[j*ROWS+i] /////// /////////////////////////////////////////////////////// extern "C" { //lunei to provlima A*x=r1 kai gemizei to d=x void cuda_GMRES( double *d, double *r1, struct common4 *sparse) { clock_t start1, end1 ; float cuda_GMRES_time=0; int blocksPerGrid; blocksPerGrid=((N+threadsPerBlock-1)/threadsPerBlock); if (blocksPerGrid > 65530) { printf("WARNING,block number exceeded hardware limit"); blocksPerGrid=65530; } //streams and devices hipStream_t stream0; hipSetDevice(0); hipStreamCreate( &stream0 ); //de metraw ta apo panw sto xrono giati einai to initialization tis kartas printf("ThreadsPerBlock=%d\n",threadsPerBlock); printf("\nCuda GMRES started computation\n"); start1 = clock(); //variables declaration int iter=0,i=0,j=0; double *dev0_AA,*dev0_r1,*dev0_help; int *dev0_JA,*dev0_IA; double *dev0_x,*dev0_r0,*dev0_w,*dev0_res,*dev0_vita,*dev0_Wm; double *dev0_Hm,*dev0_u_base,*dev0_e,*dev0_y,*dev0_g; //allocation sto device arrays me dedomena hipMalloc((void**)&dev0_r1,(N)*sizeof(double)); hipMalloc((void**)&dev0_AA,(Nz)*sizeof(double)); hipMalloc((void**)&dev0_JA,(Nz)*sizeof(int)); hipMalloc((void**)&dev0_IA,(N+1)*sizeof(int)); //allocation sto device voithikwn arrays hipMalloc((void**)&dev0_x,(N)*sizeof(double)); hipMalloc((void**)&dev0_r0,(N)*sizeof(double)); hipMalloc((void**)&dev0_Hm,((m+1)*m)*sizeof(double)); hipMalloc((void**)&dev0_u_base,(N*m)*sizeof(double)); hipMalloc((void**)&dev0_e,(m+1)*sizeof(double)); hipMalloc((void**)&dev0_y,(m)*sizeof(double)); hipMalloc((void**)&dev0_g,(m+1)*sizeof(double)); hipMalloc((void**)&dev0_w,(N)*sizeof(double)); hipMalloc((void**)&dev0_res,(blocksPerGrid)*sizeof(double)); hipMalloc((void**)&dev0_vita,sizeof(double)); hipMalloc((void**)&dev0_Wm,((m+1)*(m+1))*sizeof(double)); hipMalloc((void**)&dev0_help,sizeof(double)); //perasma dedomenwn stin global device memory hipMemcpy(dev0_AA, sparse->AA, Nz*sizeof(double), hipMemcpyDefault ); hipMemcpy(dev0_JA, sparse->JA, Nz*sizeof(int), hipMemcpyDefault ); hipMemcpy(dev0_IA, sparse->IA, (N+1)*sizeof(int), hipMemcpyDefault ); hipMemcpy(dev0_r1, r1, N*sizeof(double), hipMemcpyDefault ); ///////////////////////////////////////////////////////////////////////////////// //ksekinima epanaliptikis iter=1; hipLaunchKernelGGL(( cuda_initial2_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, (m+1),dev0_e,N, dev0_x); while (iter<=GMRES_iter) { //upologismos r0=b-A*x opou b=r1 me MATMUL se CSR format kai NORM r0 hipLaunchKernelGGL(( cuda_matvecn_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, dev0_r0, dev0_AA, dev0_JA, dev0_IA, dev0_x, dev0_r1,dev0_res); hipLaunchKernelGGL(( cuda_reduction_kernel), dim3(1),dim3(threadsPerBlock),0,stream0, blocksPerGrid, dev0_res,dev0_vita,1); //upologismos uj[]=r0[]/vita kai apothikeusi ston u_base KATA STILI orismenos //tautoxrona g=vita*e hipLaunchKernelGGL(( cuda_vec_replace_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, dev0_u_base, dev0_vita, dev0_r0, m+1, dev0_g, dev0_e); //KATASKEUI UPOXWROU Krylov for (j=0; j<m; j++) { //j einai to count g mas if (j >= 1) { //u_base[][j+1]=w[]/Hm[j+1][j] hipLaunchKernelGGL(( cuda_vec_replace2_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, &dev0_u_base[(j)*N], &dev0_Hm[(j)*m + j-1], dev0_w); } //uj[i]=u_base[i][j] //matmul me CSR w=matvec(A,uj) hipLaunchKernelGGL(( cuda_matvec_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, dev0_w, dev0_AA, dev0_JA, dev0_IA,&dev0_u_base[j*N]); for (i=0; i<=j; i++) { //uj[k]=u_base[k][i] //DOT PRODUCT w*uj-> kai eisagwgi sto Hm[i][j] hipLaunchKernelGGL(( cuda_dot_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N,dev0_w, &dev0_u_base[i*N],dev0_res); hipLaunchKernelGGL(( cuda_reduction_kernel), dim3(1),dim3(threadsPerBlock),0,stream0, blocksPerGrid, dev0_res,&dev0_Hm[i*m + j],2); //w=w-Hm(i,j)*uj hipLaunchKernelGGL(( cuda_vec_update_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, dev0_w, &dev0_Hm[i*m + j], &dev0_u_base[i*N] ); } hipLaunchKernelGGL(( cuda_norm_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, dev0_w,dev0_res); hipLaunchKernelGGL(( cuda_reduction_kernel), dim3(1),dim3(threadsPerBlock),0,stream0, blocksPerGrid, dev0_res,&dev0_Hm[(j+1)*m + j],1); /* if (j<(m-1)) { //u_base[][j+1]=w[]/Hm[j+1][j] hipLaunchKernelGGL(( cuda_vec_replace2_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, &dev0_u_base[(j+1)*N], &dev0_Hm[(j+1)*m + j], dev0_w); }*/ } //Least Squares problem if (threadsPerBlock<m) { printf("ERROR, threadsPerBlock should be greater than m"); } hipLaunchKernelGGL(( cuda_leastsq_kernel), dim3(1),dim3(threadsPerBlock),0,stream0, m,dev0_Hm,dev0_g,dev0_y,dev0_Wm); //TELOS Least Squares problem //upologismos x = x0 + matvec(u_base(N,m),y(m)) hipLaunchKernelGGL(( cuda_matvec_up_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock),0,stream0, N, m, dev0_x, dev0_u_base, dev0_y); iter++; } //Copy result back to CPU hipMemcpy(d, dev0_x, N*sizeof(double), hipMemcpyDefault ); //Free memory printf("CUDA=%.15lf\n",d[N/2]); hipFree(dev0_AA); hipFree(dev0_JA); hipFree(dev0_IA); hipFree(dev0_r1); hipFree(dev0_x); hipFree(dev0_r0); hipFree(dev0_u_base); hipFree(dev0_Hm); hipFree(dev0_y); hipFree(dev0_g); hipFree(dev0_e); hipFree(dev0_w); hipFree(dev0_Wm); hipFree(dev0_vita); hipFree(dev0_res); end1 = clock(); cuda_GMRES_time = ((double) (end1 - start1)) / CLOCKS_PER_SEC; printf("\nXronos gia Cuda_GMRES=%.5lfs\n\n",cuda_GMRES_time); } }
428cebcd017e45a48795fb3acb0973e32fe2d622.cu
// // cudaGMRES.cu // Cuda GMRES // // Created by Tim Ioannidis on 2/18/12. // Copyright 2012 Chemeng NTUA. All rights reserved. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <cuda.h> #include "structs.h" #include "parameters.h" #include "extern.h" #include "cuda_config.h" #include "cuda_methods.h" #include "cuda_dot.cu" #include "cuda_initial2.cu" #include "cuda_matvec.cu" #include "cuda_matvecn.cu" #include "cuda_vec_update.cu" #include "cuda_reduction.cu" #include "cuda_norm.cu" #include "cuda_vec_replace.cu" #include "cuda_vec_replace2.cu" #include "cuda_matvec_up.cu" #include "cuda_leastsq.cu" /////////////////////////////////////////////////////// ////// SOS!!!!! O DISDIASTATOS /////// ////// PINAKAS u_base EINAI KATA STILI /////// ////// E.G. A[i][j]=A[j*ROWS+i] /////// /////////////////////////////////////////////////////// extern "C" { //lunei to provlima A*x=r1 kai gemizei to d=x void cuda_GMRES( double *d, double *r1, struct common4 *sparse) { clock_t start1, end1 ; float cuda_GMRES_time=0; int blocksPerGrid; blocksPerGrid=((N+threadsPerBlock-1)/threadsPerBlock); if (blocksPerGrid > 65530) { printf("WARNING,block number exceeded hardware limit"); blocksPerGrid=65530; } //streams and devices cudaStream_t stream0; cudaSetDevice(0); cudaStreamCreate( &stream0 ); //de metraw ta apo panw sto xrono giati einai to initialization tis kartas printf("ThreadsPerBlock=%d\n",threadsPerBlock); printf("\nCuda GMRES started computation\n"); start1 = clock(); //variables declaration int iter=0,i=0,j=0; double *dev0_AA,*dev0_r1,*dev0_help; int *dev0_JA,*dev0_IA; double *dev0_x,*dev0_r0,*dev0_w,*dev0_res,*dev0_vita,*dev0_Wm; double *dev0_Hm,*dev0_u_base,*dev0_e,*dev0_y,*dev0_g; //allocation sto device arrays me dedomena cudaMalloc((void**)&dev0_r1,(N)*sizeof(double)); cudaMalloc((void**)&dev0_AA,(Nz)*sizeof(double)); cudaMalloc((void**)&dev0_JA,(Nz)*sizeof(int)); cudaMalloc((void**)&dev0_IA,(N+1)*sizeof(int)); //allocation sto device voithikwn arrays cudaMalloc((void**)&dev0_x,(N)*sizeof(double)); cudaMalloc((void**)&dev0_r0,(N)*sizeof(double)); cudaMalloc((void**)&dev0_Hm,((m+1)*m)*sizeof(double)); cudaMalloc((void**)&dev0_u_base,(N*m)*sizeof(double)); cudaMalloc((void**)&dev0_e,(m+1)*sizeof(double)); cudaMalloc((void**)&dev0_y,(m)*sizeof(double)); cudaMalloc((void**)&dev0_g,(m+1)*sizeof(double)); cudaMalloc((void**)&dev0_w,(N)*sizeof(double)); cudaMalloc((void**)&dev0_res,(blocksPerGrid)*sizeof(double)); cudaMalloc((void**)&dev0_vita,sizeof(double)); cudaMalloc((void**)&dev0_Wm,((m+1)*(m+1))*sizeof(double)); cudaMalloc((void**)&dev0_help,sizeof(double)); //perasma dedomenwn stin global device memory cudaMemcpy(dev0_AA, sparse->AA, Nz*sizeof(double), cudaMemcpyDefault ); cudaMemcpy(dev0_JA, sparse->JA, Nz*sizeof(int), cudaMemcpyDefault ); cudaMemcpy(dev0_IA, sparse->IA, (N+1)*sizeof(int), cudaMemcpyDefault ); cudaMemcpy(dev0_r1, r1, N*sizeof(double), cudaMemcpyDefault ); ///////////////////////////////////////////////////////////////////////////////// //ksekinima epanaliptikis iter=1; cuda_initial2_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>((m+1),dev0_e,N, dev0_x); while (iter<=GMRES_iter) { //upologismos r0=b-A*x opou b=r1 me MATMUL se CSR format kai NORM r0 cuda_matvecn_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>(N, dev0_r0, dev0_AA, dev0_JA, dev0_IA, dev0_x, dev0_r1,dev0_res); cuda_reduction_kernel<<<1,threadsPerBlock,0,stream0>>>(blocksPerGrid, dev0_res,dev0_vita,1); //upologismos uj[]=r0[]/vita kai apothikeusi ston u_base KATA STILI orismenos //tautoxrona g=vita*e cuda_vec_replace_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>( N, dev0_u_base, dev0_vita, dev0_r0, m+1, dev0_g, dev0_e); //KATASKEUI UPOXWROU Krylov for (j=0; j<m; j++) { //j einai to count g mas if (j >= 1) { //u_base[][j+1]=w[]/Hm[j+1][j] cuda_vec_replace2_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>(N, &dev0_u_base[(j)*N], &dev0_Hm[(j)*m + j-1], dev0_w); } //uj[i]=u_base[i][j] //matmul me CSR w=matvec(A,uj) cuda_matvec_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>( N, dev0_w, dev0_AA, dev0_JA, dev0_IA,&dev0_u_base[j*N]); for (i=0; i<=j; i++) { //uj[k]=u_base[k][i] //DOT PRODUCT w*uj-> kai eisagwgi sto Hm[i][j] cuda_dot_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>( N,dev0_w, &dev0_u_base[i*N],dev0_res); cuda_reduction_kernel<<<1,threadsPerBlock,0,stream0>>>(blocksPerGrid, dev0_res,&dev0_Hm[i*m + j],2); //w=w-Hm(i,j)*uj cuda_vec_update_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>( N, dev0_w, &dev0_Hm[i*m + j], &dev0_u_base[i*N] ); } cuda_norm_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>(N, dev0_w,dev0_res); cuda_reduction_kernel<<<1,threadsPerBlock,0,stream0>>>(blocksPerGrid, dev0_res,&dev0_Hm[(j+1)*m + j],1); /* if (j<(m-1)) { //u_base[][j+1]=w[]/Hm[j+1][j] cuda_vec_replace2_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>(N, &dev0_u_base[(j+1)*N], &dev0_Hm[(j+1)*m + j], dev0_w); }*/ } //Least Squares problem if (threadsPerBlock<m) { printf("ERROR, threadsPerBlock should be greater than m"); } cuda_leastsq_kernel<<<1,threadsPerBlock,0,stream0>>>(m,dev0_Hm,dev0_g,dev0_y,dev0_Wm); //TELOS Least Squares problem //upologismos x = x0 + matvec(u_base(N,m),y(m)) cuda_matvec_up_kernel<<<blocksPerGrid,threadsPerBlock,0,stream0>>>(N, m, dev0_x, dev0_u_base, dev0_y); iter++; } //Copy result back to CPU cudaMemcpy(d, dev0_x, N*sizeof(double), cudaMemcpyDefault ); //Free memory printf("CUDA=%.15lf\n",d[N/2]); cudaFree(dev0_AA); cudaFree(dev0_JA); cudaFree(dev0_IA); cudaFree(dev0_r1); cudaFree(dev0_x); cudaFree(dev0_r0); cudaFree(dev0_u_base); cudaFree(dev0_Hm); cudaFree(dev0_y); cudaFree(dev0_g); cudaFree(dev0_e); cudaFree(dev0_w); cudaFree(dev0_Wm); cudaFree(dev0_vita); cudaFree(dev0_res); end1 = clock(); cuda_GMRES_time = ((double) (end1 - start1)) / CLOCKS_PER_SEC; printf("\nXronos gia Cuda_GMRES=%.5lfs\n\n",cuda_GMRES_time); } }
7a670ca73af28efeef88b1e890826058f858e4c6.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/ifft.hpp> #include <nbla/cuda/function/utils/fft.cuh> #include <nbla/variable.hpp> namespace nbla { template <typename T> IFFTCuda<T>::~IFFTCuda() { hipfftDestroy(plan_forward_); hipfftDestroy(plan_backward_); } template <typename T> void IFFTCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); IFFT<T>::setup_impl(inputs, outputs); hipfftCreate(&plan_forward_); hipfftCreate(&plan_backward_); // Compute scale and store the original shape (i.e, n) Shape_t oshape(outputs[0]->shape()); Size_t base_axis_output = oshape.size() - 1 - this->signal_ndim_; for (int i = 0; i < this->signal_ndim_; i++) { signal_size_ *= oshape[base_axis_output + i]; n_.push_back(oshape[base_axis_output + i]); } } template <typename T> void IFFTCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); // IFFT const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); exec_cufft<Tcu>(this->ctx_, x, y, inputs[0]->shape(), outputs[0]->shape(), plan_forward_, true, true, HIPFFT_BACKWARD, this->n_, this->signal_ndim_); // Normalize const Size_t size = outputs[0]->size(); if (this->normalized_) { const float scale = 1.f / std::sqrt(this->signal_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, y); } else { const float scale = 1.f / this->signal_size_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, y); } } template <typename T> void IFFTCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0])) { return; } cuda_set_device(this->device_); const Size_t size = inputs[0]->size(); if (accum[0]) { // Create tmp array NdArrayPtr ndarray = make_shared<NdArray>(inputs[0]->shape()); // FFT const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); Tcu *tmp_buff = ndarray->cast(get_dtype<Tcu>(), this->ctx_)->pointer<Tcu>(); exec_cufft<Tcu>(this->ctx_, dy, tmp_buff, outputs[0]->shape(), inputs[0]->shape(), plan_backward_, true, true, HIPFFT_FORWARD, this->n_, this->signal_ndim_); // Normalize const Size_t size = inputs[0]->size(); if (this->normalized_) { const float scale = 1.f / std::sqrt(this->signal_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, tmp_buff); } else { const float scale = 1.f / this->signal_size_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, tmp_buff); } // Accumulation Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_add_cufft_result, size, tmp_buff, dx); } else { // FFT const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); exec_cufft<Tcu>(this->ctx_, dy, dx, outputs[0]->shape(), inputs[0]->shape(), plan_backward_, true, true, HIPFFT_FORWARD, this->n_, this->signal_ndim_); // Normalize const Size_t size = inputs[0]->size(); if (this->normalized_) { const float scale = 1.f / std::sqrt(this->signal_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, dx); } else { const float scale = 1.f / this->signal_size_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, dx); } } } }
7a670ca73af28efeef88b1e890826058f858e4c6.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/ifft.hpp> #include <nbla/cuda/function/utils/fft.cuh> #include <nbla/variable.hpp> namespace nbla { template <typename T> IFFTCuda<T>::~IFFTCuda() { cufftDestroy(plan_forward_); cufftDestroy(plan_backward_); } template <typename T> void IFFTCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); IFFT<T>::setup_impl(inputs, outputs); cufftCreate(&plan_forward_); cufftCreate(&plan_backward_); // Compute scale and store the original shape (i.e, n) Shape_t oshape(outputs[0]->shape()); Size_t base_axis_output = oshape.size() - 1 - this->signal_ndim_; for (int i = 0; i < this->signal_ndim_; i++) { signal_size_ *= oshape[base_axis_output + i]; n_.push_back(oshape[base_axis_output + i]); } } template <typename T> void IFFTCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); // IFFT const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_); Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); exec_cufft<Tcu>(this->ctx_, x, y, inputs[0]->shape(), outputs[0]->shape(), plan_forward_, true, true, CUFFT_INVERSE, this->n_, this->signal_ndim_); // Normalize const Size_t size = outputs[0]->size(); if (this->normalized_) { const float scale = 1.f / std::sqrt(this->signal_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, y); } else { const float scale = 1.f / this->signal_size_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, y); } } template <typename T> void IFFTCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0])) { return; } cuda_set_device(this->device_); const Size_t size = inputs[0]->size(); if (accum[0]) { // Create tmp array NdArrayPtr ndarray = make_shared<NdArray>(inputs[0]->shape()); // FFT const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); Tcu *tmp_buff = ndarray->cast(get_dtype<Tcu>(), this->ctx_)->pointer<Tcu>(); exec_cufft<Tcu>(this->ctx_, dy, tmp_buff, outputs[0]->shape(), inputs[0]->shape(), plan_backward_, true, true, CUFFT_FORWARD, this->n_, this->signal_ndim_); // Normalize const Size_t size = inputs[0]->size(); if (this->normalized_) { const float scale = 1.f / std::sqrt(this->signal_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, tmp_buff); } else { const float scale = 1.f / this->signal_size_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, tmp_buff); } // Accumulation Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_add_cufft_result, size, tmp_buff, dx); } else { // FFT const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); exec_cufft<Tcu>(this->ctx_, dy, dx, outputs[0]->shape(), inputs[0]->shape(), plan_backward_, true, true, CUFFT_FORWARD, this->n_, this->signal_ndim_); // Normalize const Size_t size = inputs[0]->size(); if (this->normalized_) { const float scale = 1.f / std::sqrt(this->signal_size_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, dx); } else { const float scale = 1.f / this->signal_size_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale, dx); } } } }
18482529024c36e4e88ac5b755f34e49cbbe08ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dpCudaMatrixTranspose.hpp" #include "errorCheck.hpp" #define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); } #define BEGIN hipEventRecord(begin, 0); #define END hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&delTime, begin, end); //source: //http://code.msdn.microsoft.com/windowsdesktop/Matrix-Transpose-on-GPU-94f0a054 __global__ void matrixTranspose(const float* m, float* t, int matrixSize){ int x = blockIdx.x * blockDim.x + threadIdx.x; // col int y = blockIdx.y * blockDim.y + threadIdx.y; // row if (x >= matrixSize || y >= matrixSize) return; int from = x + y * matrixSize; int to = y + x * matrixSize; t[to] = m[from]; // t(j,i) = m(i,j) } dpCudaMatrixTranspose::dpCudaMatrixTranspose(cl_context ctx, cl_command_queue q){ workDimension = TWO_D; //name is same as cl alternative allowing the analysis script to later figure //out this measurement was from a cuda kernel by inspecting the platform id from dpClient name = "MatrixTranspose"; hipEventCreate(&begin); hipEventCreate(&end); hipGetDevice(&device); hipGetDeviceProperties(&props, device); } void dpCudaMatrixTranspose::setup(int dataMB, int xLocal, int yLocal, int zLocal){ localSize[0] = localSize[1] = localSize[2] = 1; M=(int)sqrt(dataMB*1048576/sizeof(float)); N=M; //not tested with rectangle matrix MB = M*N*sizeof(float)/1048576; } void dpCudaMatrixTranspose::init(){ dataParameters.push_back(M); dataParameters.push_back(N); dataNames.push_back("width"); dataNames.push_back("height"); mem_size = sizeof(float) * M * N; // allocate and initalize host memory Ain = (float*)malloc(mem_size); Aout = (float*)malloc(mem_size); if(!Ain || !Aout) fprintf(stderr, "error in malloc\n"); generateMatrix(Ain, M, N); } void dpCudaMatrixTranspose::memoryCopyOut(){ BEGIN cudaErrChk(hipMalloc((void **) &Ain_d, mem_size)); cudaErrChk(hipMalloc((void **) &Aout_d, mem_size)); cudaErrChk(hipMemcpy(Ain_d, Ain, mem_size, hipMemcpyHostToDevice)); END //printf("%0.3f,",delTime); } void dpCudaMatrixTranspose::plan(){ BEGIN //use the largest block possible: blockSize.x = props.maxThreadsDim[0]; blockSize.y = props.maxThreadsDim[1]; if (blockSize.x*blockSize.y > props.maxThreadsPerBlock){ blockSize.x = (int) sqrt(props.maxThreadsPerBlock); blockSize.y = blockSize.x; } //specify number of blocks in width and height of grid: nBlocks.x = N/blockSize.x - N%blockSize.x + blockSize.x; nBlocks.y = M/blockSize.y - N%blockSize.y + blockSize.y; END } int dpCudaMatrixTranspose::execute(){ dim3 grid(nBlocks.x, nBlocks.y); dim3 block(blockSize.x, blockSize.y); hipError_t err; BEGIN hipLaunchKernelGGL(( matrixTranspose) , dim3(grid), dim3(block) , 0, 0, Ain_d, Aout_d, M); err = hipPeekAtLastError() ; cudaErrChk(err); cudaErrChk(hipDeviceSynchronize()); END //printf("%0.3f,",delTime); if(err!=hipSuccess) return -1; return 0; } void dpCudaMatrixTranspose::memoryCopyIn(){ BEGIN cudaErrChk(hipMemcpy(Aout, Aout_d, mem_size, hipMemcpyDeviceToHost)); END //printf("%0.3f,\n",delTime); } void dpCudaMatrixTranspose::cleanUp(){ free(Ain); free(Aout); hipFree(Ain_d); hipFree(Aout_d); } void dpCudaMatrixTranspose::generateMatrix(float *A, int height, int width){ int i, j; srand(time(NULL)); for (j=0; j<height; j++){//rows in A for (i=0; i<width; i++){//cols in A A[i + width*j] = rand() / (RAND_MAX/99999.9 + 1); } } } /* #include <stddef.h> #include <sys/types.h> #include <unistd.h> #include <stdlib.h> #include <sys/un.h> #include <math.h> #include <errno.h> #include <string.h> */
18482529024c36e4e88ac5b755f34e49cbbe08ee.cu
#include "dpCudaMatrixTranspose.hpp" #include "errorCheck.hpp" #define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); } #define BEGIN cudaEventRecord(begin, 0); #define END cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&delTime, begin, end); //source: //http://code.msdn.microsoft.com/windowsdesktop/Matrix-Transpose-on-GPU-94f0a054 __global__ void matrixTranspose(const float* m, float* t, int matrixSize){ int x = blockIdx.x * blockDim.x + threadIdx.x; // col int y = blockIdx.y * blockDim.y + threadIdx.y; // row if (x >= matrixSize || y >= matrixSize) return; int from = x + y * matrixSize; int to = y + x * matrixSize; t[to] = m[from]; // t(j,i) = m(i,j) } dpCudaMatrixTranspose::dpCudaMatrixTranspose(cl_context ctx, cl_command_queue q){ workDimension = TWO_D; //name is same as cl alternative allowing the analysis script to later figure //out this measurement was from a cuda kernel by inspecting the platform id from dpClient name = "MatrixTranspose"; cudaEventCreate(&begin); cudaEventCreate(&end); cudaGetDevice(&device); cudaGetDeviceProperties(&props, device); } void dpCudaMatrixTranspose::setup(int dataMB, int xLocal, int yLocal, int zLocal){ localSize[0] = localSize[1] = localSize[2] = 1; M=(int)sqrt(dataMB*1048576/sizeof(float)); N=M; //not tested with rectangle matrix MB = M*N*sizeof(float)/1048576; } void dpCudaMatrixTranspose::init(){ dataParameters.push_back(M); dataParameters.push_back(N); dataNames.push_back("width"); dataNames.push_back("height"); mem_size = sizeof(float) * M * N; // allocate and initalize host memory Ain = (float*)malloc(mem_size); Aout = (float*)malloc(mem_size); if(!Ain || !Aout) fprintf(stderr, "error in malloc\n"); generateMatrix(Ain, M, N); } void dpCudaMatrixTranspose::memoryCopyOut(){ BEGIN cudaErrChk(cudaMalloc((void **) &Ain_d, mem_size)); cudaErrChk(cudaMalloc((void **) &Aout_d, mem_size)); cudaErrChk(cudaMemcpy(Ain_d, Ain, mem_size, cudaMemcpyHostToDevice)); END //printf("%0.3f,",delTime); } void dpCudaMatrixTranspose::plan(){ BEGIN //use the largest block possible: blockSize.x = props.maxThreadsDim[0]; blockSize.y = props.maxThreadsDim[1]; if (blockSize.x*blockSize.y > props.maxThreadsPerBlock){ blockSize.x = (int) sqrt(props.maxThreadsPerBlock); blockSize.y = blockSize.x; } //specify number of blocks in width and height of grid: nBlocks.x = N/blockSize.x - N%blockSize.x + blockSize.x; nBlocks.y = M/blockSize.y - N%blockSize.y + blockSize.y; END } int dpCudaMatrixTranspose::execute(){ dim3 grid(nBlocks.x, nBlocks.y); dim3 block(blockSize.x, blockSize.y); cudaError_t err; BEGIN matrixTranspose <<< grid, block >>> (Ain_d, Aout_d, M); err = cudaPeekAtLastError() ; cudaErrChk(err); cudaErrChk(cudaDeviceSynchronize()); END //printf("%0.3f,",delTime); if(err!=cudaSuccess) return -1; return 0; } void dpCudaMatrixTranspose::memoryCopyIn(){ BEGIN cudaErrChk(cudaMemcpy(Aout, Aout_d, mem_size, cudaMemcpyDeviceToHost)); END //printf("%0.3f,\n",delTime); } void dpCudaMatrixTranspose::cleanUp(){ free(Ain); free(Aout); cudaFree(Ain_d); cudaFree(Aout_d); } void dpCudaMatrixTranspose::generateMatrix(float *A, int height, int width){ int i, j; srand(time(NULL)); for (j=0; j<height; j++){//rows in A for (i=0; i<width; i++){//cols in A A[i + width*j] = rand() / (RAND_MAX/99999.9 + 1); } } } /* #include <stddef.h> #include <sys/types.h> #include <unistd.h> #include <stdlib.h> #include <sys/un.h> #include <math.h> #include <errno.h> #include <string.h> */
2172e4e9d17fba543e8ea49a235b42eee843ff5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include <owl/common/math/random.h> #include "cudaHelpers.cuh" #include <thrust/device_vector.h> #include <thrust/count.h> #include <thrust/execution_policy.h> //Brownian motion #include <ctime> #include <hiprand/hiprand_kernel.h> //Mesh #include "cuda/DeviceTetMesh.cuh" #include "query/RTQuery.h" namespace advect { double diffusionCoef = 0.1; // kbT/(6*pi*mu*rp) um^2/s // // Thrust helper // template <typename T> struct minmax_pair { T min_val; T max_val; }; // minmax_unary_op is a functor that takes in a value x and // returns a minmax_pair whose minimum and maximum values // are initialized to x. template <typename T> struct minmax_unary_op : public thrust::unary_function< T, minmax_pair<T> > { __host__ __device__ minmax_pair<T> operator()(const T& x) const { minmax_pair<T> result; result.min_val = x; result.max_val = x; return result; } }; // minmax_binary_op is a functor that accepts two minmax_pair // structs and returns a new minmax_pair whose minimum and // maximum values are the min() and max() respectively of // the minimums and maximums of the input pairs template <typename T> struct minmax_binary_op : public thrust::binary_function< minmax_pair<T>, minmax_pair<T>, minmax_pair<T> > { __host__ __device__ minmax_pair<T> operator()(const minmax_pair<T>& x, const minmax_pair<T>& y) const { minmax_pair<T> result; result.min_val = thrust::min(x.min_val, y.min_val); result.max_val = thrust::max(x.max_val, y.max_val); return result; } }; template<typename T> struct negative { __host__ __device__ bool operator()(const T& x) const { return x < 0; } }; //----------------End Thrust Helper------------------- //[Device] Init Random particles (pos and vel) in a box __global__ void initParticlesKernel(Particle* particles, int N, const box3d worldBounds) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= N) return; LCG<16> random; random.init(threadIdx.x, blockIdx.x); // *any* random position for now: vec3d randomPos = worldBounds.lower + vec3d(random(), random(), random()) * worldBounds.size(); particles[particleID].x = randomPos.x; particles[particleID].y = randomPos.y; particles[particleID].z = randomPos.z; particles[particleID].w = true; } //[Host] Init Random particles (pos and vel) in a box void cudaInitParticles(Particle* d_particles, int N, const box3d& worldBounds) { int blockDims = 128; int gridDims = divRoundUp(N, blockDims); initParticlesKernel << <gridDims, blockDims >> > (d_particles, N, worldBounds); cudaCheck(hipDeviceSynchronize()); } //[Host] Find the number of particles in file int advect::loadNumParticles(std::string fileName) { int numParticles = 0; std::string word; std::ifstream vfile(fileName); if (vfile.is_open()) { vfile >> word >> numParticles;//Header Line printf("%s %d\n", word.c_str(), numParticles); } vfile.close(); return numParticles; } //[Host] Init particles (pos and vel) from file void advect::cudaInitParticles(Particle* d_particles, int N, std::string fileName) { //Read particles std::string word; int numParticles = 0; std::vector<Particle> particle_loc; particle_loc.reserve(N); std::ifstream vfile(fileName); if (vfile.is_open()) { vfile >> word >> numParticles;//Header Line printf("%s %d\n", word.c_str(), numParticles); vfile >> word >> word >> word >>word;//Comment line, x,y,z,tetID for (int i = 0; i < N; ++i) { Particle pos; vfile >> pos.x >> pos.y >> pos.z >> word; pos.w = true; particle_loc.push_back(pos); if (i < 5) std::cout << "Seeding Pos" << i << " " << vec4d(particle_loc[i].x, particle_loc[i].y, particle_loc[i].z, particle_loc[i].w) << std::endl; } vfile.close(); } //Upload to GPU cudaCheck(hipMemcpy(d_particles, particle_loc.data(), N * sizeof(particle_loc[0]), hipMemcpyHostToDevice)); cudaCheck(hipDeviceSynchronize()); } //[Device] Init Random particles (pos and vel) in a box __global__ void evalTimestep(int NumTets, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_TetVelocities, double diffusionCoef, double* d_dt) { int tetID = threadIdx.x + blockDim.x * blockIdx.x; if (tetID >= NumTets) return; vec4i index = d_tetIndices[tetID]; const vec3d A = d_vertexPositions[index.x]; const vec3d B = d_vertexPositions[index.y]; const vec3d C = d_vertexPositions[index.z]; const vec3d D = d_vertexPositions[index.w]; const double volume = dot(D - A, cross(B - A, C - A)); const double grid_h = cbrt(volume); //Velocity timestep constrains : not exceed half of grid size const vec3d vel = d_TetVelocities[tetID]; double max_vel_disp = length(vel); const double dt_vel = 0.5*grid_h/ max_vel_disp; //Brownian motion timestep constrians : const double dt_vel_brownian = (sqrt(6 * diffusionCoef + 2 * max_vel_disp * grid_h) - sqrt(6 * diffusionCoef)) / (2 * max_vel_disp); const double dt_estimate = abs(min(dt_vel_brownian, dt_vel)); d_dt[tetID] = dt_estimate<1e-8?1.12345678:dt_estimate; //if (d_dt[tetID] < 1e-2) // printf("TetID=%d Vel=%f,%f,%f Velocity Disp=%f\n grid_size=%f, dt_vel=%f, dt_vel+Brownian=%f\n", tetID, // vel.x, vel.y, vel.z, max_vel_disp, // grid_h, dt_vel, dt_vel_brownian); } //[Host] Estimate stable step size based on local element size double advect::cudaEvalTimestep(int NumTets, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_Velocities, std::string mode) { thrust::device_vector<double> d_dt_thrust(NumTets, 1000.0); double* d_dt = thrust::raw_pointer_cast(d_dt_thrust.data()); int blockDims = 128; int gridDims = divRoundUp(NumTets, blockDims); evalTimestep << <gridDims, blockDims >> > (NumTets, d_tetIndices, d_vertexPositions, d_Velocities, diffusionCoef, d_dt); cudaCheck(hipDeviceSynchronize()); // setup arguments minmax_unary_op<double> unary_op; minmax_binary_op<double> binary_op; // initialize reduction with the first value minmax_pair<double> init = unary_op(d_dt_thrust[0]); // compute minimum and maximum values minmax_pair<double> result = thrust::transform_reduce(d_dt_thrust.begin(), d_dt_thrust.end(), unary_op, init, binary_op); std::cout << "#adv: minimum dt= " << result.min_val << std::endl; std::cout << "#adv: maximum dt= " << result.max_val << std::endl; return result.min_val; } //----------------------Advection-------------------- //[Device] Pk tet velocity interpolation __global__ void particleAdvectKernel(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_vertexVelocities) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } vec4i index = d_tetIndices[tetID]; const vec3d P = vec3d(p.x, p.y, p.z); const vec3d A = d_vertexPositions[index.x]; const vec3d B = d_vertexPositions[index.y]; const vec3d C = d_vertexPositions[index.z]; const vec3d D = d_vertexPositions[index.w]; const double den = det(A, B, C, D); if (den == 0.f) {//We are in a bad tet, set tetID=-2 p.w = false; return; } const double wA = det(P, B, C, D) * (1. / den); const double wB = det(A, P, C, D) * (1. / den); const double wC = det(A, B, P, D) * (1. / den); const double wD = det(A, B, C, P) * (1. / den); const vec3d velA = d_vertexVelocities[index.x]; const vec3d velB = d_vertexVelocities[index.y]; const vec3d velC = d_vertexVelocities[index.z]; const vec3d velD = d_vertexVelocities[index.w]; const vec3d vel = wA * velA + wB * velB + wC * velC + wD * velD; //First-order Euler Integration const vec3d P_next = P + dt * vel; const vec3d P_disp = P_next - P; d_vels[particleID] = vec4d(vel.x, vel.y, vel.z, -1.0); d_disp[particleID] = vec4d(P_disp.x, P_disp.y, P_disp.z, -1.0); //p.x = P_next.x; p.y = P_next.y; p.z = P_next.z; /* if (particleID == 98550) printf("%d [Advect] TetID=%d P(%f,%f,%f) Disp(%f,%f,%f) Vel(%f,%f,%f)\n", particleID, tetID, P.x, P.y, P.z, P_disp.x, P_disp.y, P_disp.z, vel.x, vel.y, vel.z); */ } //[Device] RT0 tet velocity interpolation __global__ void particleAdvectKernelTetVel(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_TetVelocities) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } vec4i index = d_tetIndices[tetID]; const vec3d P = vec3d(p.x, p.y, p.z); const vec3d A = d_vertexPositions[index.x]; const vec3d B = d_vertexPositions[index.y]; const vec3d C = d_vertexPositions[index.z]; const vec3d D = d_vertexPositions[index.w]; const double den = det(A, B, C, D); if (den == 0.f) {//We are in a bad tet, set tetID=-2 p.w = false; return; } const vec3d vel = (vec3d&)d_TetVelocities[tetID]; //First-order Euler Integration const vec3d P_next = P + dt * vel; const vec3d P_disp = P_next - P; d_vels[particleID] = vec4d(vel.x, vel.y, vel.z, -1.0); d_disp[particleID] = vec4d(P_disp.x, P_disp.y, P_disp.z, -1.0); //p.x = P_next.x; p.y = P_next.y; p.z = P_next.z; /* if (particleID <100) printf("%d [Advect] TetID=%d P(%f,%f,%f) Disp(%f,%f,%f) Vel(%f,%f,%f)\n", particleID, tetID, P.x, P.y, P.z, P_disp.x, P_disp.y, P_disp.z, vel.x, vel.y, vel.z); */ } //[Device] Constant initial velocity interpolation __global__ void particleAdvectConstVel(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } //Constant velocity, First-order Euler Integration vec4d& vel = d_vels[particleID]; d_disp[particleID] = vec4d(vel.x*dt, vel.y * dt, vel.z * dt, -1.0); } // [Velocity] Tet velocity interpolation // output: d_vels void cudaAdvect(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_Velocities, std::string mode) { int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); if (mode == "TetVelocity") { particleAdvectKernelTetVel << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles, d_tetIndices, d_vertexPositions, d_Velocities); } if (mode == "VertexVelocity") particleAdvectKernel << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles, d_tetIndices, d_vertexPositions, d_Velocities); if (mode == "ConstantVelocity") particleAdvectConstVel << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles); cudaCheck(hipDeviceSynchronize()); } //[Device] Constant initial velocity interpolation __device__ double SquareDuct_analyticalVel(double x,double y, double h,double L, double dp,double mu) { //10.1103/PhysRevE.71.057301 double vz = 0.0; for (int i = 0; i < 20; i++) { double n = 2.0 * i + 1.0; vz += 1 / (n * n * n) * ( 1.0 - cosh(n* M_PI*x/h) / cosh(n*M_PI/2.0) )*sin(n * M_PI * y/ h); } vz = -dp / L / mu * 4.0 * h * h / M_PI / M_PI / M_PI * vz; return vz; } __global__ void particleTubeAdvect(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, double h, double L, double dp, double mu) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } //Constant velocity, First-order Euler Integration vec4d& vel = d_vels[particleID]; vel.x = 0.0; vel.y = 0.0; vel.z = SquareDuct_analyticalVel(p.x, p.y, h, L, dp, mu); //printf("%lf %lf %lf->vel=%lf,%lf,%lf\n", p.x, p.y, p.z, vel.x, vel.y, vel.z); d_disp[particleID] = vec4d(vel.x * dt, vel.y * dt, vel.z * dt, -1.0); //if(particleID<5) // printf("(%lf,%lf,%lf) Vel=(%lf,%lf,%lf)\n", p.x, p.y, p.z, vel.x, vel.y, vel.z); } void advect::cudaTubeAdvect(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles) { double L = 30;//cm double h = 0.1;//cm double mu = 0.001072;//Pa s double dp = -4.904871302657455;//Pa double Q = 0.000536;//cm^3/s int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); particleTubeAdvect << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles, h, L, dp, mu); cudaCheck(hipDeviceSynchronize()); //system("pause"); } //----------------------Brownian motion-------------------- #include <thrust/execution_policy.h> struct InitCURAND { unsigned long long seed; hiprandState_t* states; InitCURAND(unsigned long long _seed, hiprandState_t* _states) { seed = _seed; states = _states; } __device__ void operator()(unsigned int i) { hiprand_init(seed, i, 0, &states[i]); } }; void advect::initRandomGenerator(int numParticles, hiprandState_t* rand_states){ //Each particle has its own random generator for each thread long int rng_seed = time(NULL); rng_seed = 1591593751; printf("#adv: Random Seed=%d\n", rng_seed); thrust::counting_iterator<unsigned int> count(0); thrust::for_each(count, count + numParticles, InitCURAND(rng_seed, rand_states)); } //[Device] Constant initial velocity interpolation __global__ void particleBrownianMotion(Particle* d_particles, vec4d* d_disp, double D, hiprandState_t* states, double dt, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const double randDisp = sqrt(2.00 * D * dt); const double randXi0 = hiprand_normal_double(&states[particleID]); const double randXi1 = hiprand_normal_double(&states[particleID]); const double randXi2 = hiprand_normal_double(&states[particleID]); d_disp[particleID] += vec4d(randXi0, randXi1, randXi2, 0.0)*randDisp; //if (particleID == 170) //printf("%d D=%f dt=%f Random disp (%f,%f,%f)-(%f,%f,%f) Disp %lf\n", particleID, D,dt, // d_disp[particleID].x, d_disp[particleID].y, d_disp[particleID].z, // randXi0, randXi1, randXi2,randDisp); } void advect::cudaBrownianMotion(Particle* d_particles, vec4d* d_disp, hiprandState_t* states, double dt, int numParticles) {//Brownian motion //double diffusionCoef = 0.1;// kbT/(6*pi*mu*rp) um^2/s int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); diffusionCoef = 5.7e-6; particleBrownianMotion << <gridDims, blockDims >> > (d_particles, d_disp, diffusionCoef, states, dt, numParticles); cudaCheck(hipDeviceSynchronize()); //system("pause"); } __global__ void checkParticles(int* d_triIDs, bool * isHit, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; //if (particleID == 52747) //printf("Check Pts %d %d OldStatus=%d\n",particleID, d_triIDs[particleID], isHit[particleID]); if (d_triIDs[particleID] == -1) return; isHit[particleID] = true; } //----------------------Move-------------------- __global__ void particleMoveKernel(Particle* d_particles, vec4d* d_vels, double dt, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return;//particle is out of domain const vec4d P = vec4d(p.x, p.y, p.z, p.w); const vec4d vel = d_vels[particleID]; const vec4d P_next = P + vel * dt; //if (particleID == 5) // printf("[Move] %d (%f,%f,%f)->(%f,%f,%f) @ Vel(%f,%f,%f)\n", particleID, // p.x, p.y, p.z, P_next.x, P_next.y, P_next.z, // vel.x, vel.y, vel.z); p.x = P_next.x; p.y = P_next.y; p.z = P_next.z; } //[Host] Move particles x1 = x0 + vel*dt and do specular reflection if hit the wall // d_tetIDs used to determine the status of a particle void advect::cudaMoveParticles(Particle* d_particles, vec4d* d_vels, double dt, int numParticles, int* d_tetIDs){ int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); //Move particles particleMoveKernel << <gridDims, blockDims >> > (d_particles, d_vels, dt, numParticles); cudaCheck(hipDeviceSynchronize()); } __global__ void particleMoveKernel(Particle* d_particles, vec4d* d_disps, int* d_tetIDs, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; vec4d& disp = d_disps[particleID]; /* if (!p.w) { //if (particleID == 33216) //if (particleID == 52747 || particleID == 98550 || particleID == 83144) //printf("[Move] %d (%.15lf,%.15lf,%.15lf)->Disp(%.15f,%.15f,%.15f)\n", particleID, // p.x, p.y, p.z, // disp.x, disp.y, disp.z); } */ if (!p.w) return;//particle is out of domain /* const vec4d P = vec4d(p.x, p.y, p.z, p.w); const vec4d P_next = P + disp; if (particleID == 50606) { //if (particleID == 52747 || particleID == 98550 || particleID == 83144) printf("[Move] %d tetID=%d (%.15lf,%.15lf,%.15lf)->(%.15f,%.15f,%.15f) Disp(%.15f,%.15f,%.15f)\n", particleID, d_tetIDs[particleID], p.x, p.y, p.z, P_next.x, P_next.y, P_next.z, disp.x, disp.y, disp.z); } */ //Update location p.x += disp.x; p.y += disp.y; p.z += disp.z; //Reset disp for the next iteration disp.x = 0.0; disp.y = 0.0; disp.z = 0.0; //disp.w = 0.0; } void advect::cudaMoveParticles(Particle* d_particles, vec4d* d_disps, int numParticles, int* d_tetIDs) { int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); //Move particles particleMoveKernel << <gridDims, blockDims >> > (d_particles, d_disps, d_tetIDs,numParticles); cudaCheck(hipDeviceSynchronize()); } //-------------------------Debug----------------------- __global__ void reportParticles(int* tetIDs, int numParticles, int TagID) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; if (tetIDs[particleID] <= TagID) printf("--Particle [%d] TagID=%d\n", particleID, tetIDs[particleID]); } //[Host] Check particle status based on tetID void advect::cudaReportParticles(int numParticles, int* d_tetIDs) { thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(d_tetIDs); int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); int NumBadParticles = thrust::count_if(thrust::device, dev_ptr, dev_ptr + numParticles, negative<int>()); printf("#adv: Out-of-domain particles(-tetID) = %d\n", NumBadParticles); if (NumBadParticles > 0) { reportParticles << <gridDims, blockDims >> > (d_tetIDs, numParticles, -1); cudaCheck(hipDeviceSynchronize()); } } }
2172e4e9d17fba543e8ea49a235b42eee843ff5a.cu
#include "common.h" #include <owl/common/math/random.h> #include "cudaHelpers.cuh" #include <thrust/device_vector.h> #include <thrust/count.h> #include <thrust/execution_policy.h> //Brownian motion #include <ctime> #include <curand_kernel.h> //Mesh #include "cuda/DeviceTetMesh.cuh" #include "query/RTQuery.h" namespace advect { double diffusionCoef = 0.1; // kbT/(6*pi*mu*rp) um^2/s // // Thrust helper // template <typename T> struct minmax_pair { T min_val; T max_val; }; // minmax_unary_op is a functor that takes in a value x and // returns a minmax_pair whose minimum and maximum values // are initialized to x. template <typename T> struct minmax_unary_op : public thrust::unary_function< T, minmax_pair<T> > { __host__ __device__ minmax_pair<T> operator()(const T& x) const { minmax_pair<T> result; result.min_val = x; result.max_val = x; return result; } }; // minmax_binary_op is a functor that accepts two minmax_pair // structs and returns a new minmax_pair whose minimum and // maximum values are the min() and max() respectively of // the minimums and maximums of the input pairs template <typename T> struct minmax_binary_op : public thrust::binary_function< minmax_pair<T>, minmax_pair<T>, minmax_pair<T> > { __host__ __device__ minmax_pair<T> operator()(const minmax_pair<T>& x, const minmax_pair<T>& y) const { minmax_pair<T> result; result.min_val = thrust::min(x.min_val, y.min_val); result.max_val = thrust::max(x.max_val, y.max_val); return result; } }; template<typename T> struct negative { __host__ __device__ bool operator()(const T& x) const { return x < 0; } }; //----------------End Thrust Helper------------------- //[Device] Init Random particles (pos and vel) in a box __global__ void initParticlesKernel(Particle* particles, int N, const box3d worldBounds) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= N) return; LCG<16> random; random.init(threadIdx.x, blockIdx.x); // *any* random position for now: vec3d randomPos = worldBounds.lower + vec3d(random(), random(), random()) * worldBounds.size(); particles[particleID].x = randomPos.x; particles[particleID].y = randomPos.y; particles[particleID].z = randomPos.z; particles[particleID].w = true; } //[Host] Init Random particles (pos and vel) in a box void cudaInitParticles(Particle* d_particles, int N, const box3d& worldBounds) { int blockDims = 128; int gridDims = divRoundUp(N, blockDims); initParticlesKernel << <gridDims, blockDims >> > (d_particles, N, worldBounds); cudaCheck(cudaDeviceSynchronize()); } //[Host] Find the number of particles in file int advect::loadNumParticles(std::string fileName) { int numParticles = 0; std::string word; std::ifstream vfile(fileName); if (vfile.is_open()) { vfile >> word >> numParticles;//Header Line printf("%s %d\n", word.c_str(), numParticles); } vfile.close(); return numParticles; } //[Host] Init particles (pos and vel) from file void advect::cudaInitParticles(Particle* d_particles, int N, std::string fileName) { //Read particles std::string word; int numParticles = 0; std::vector<Particle> particle_loc; particle_loc.reserve(N); std::ifstream vfile(fileName); if (vfile.is_open()) { vfile >> word >> numParticles;//Header Line printf("%s %d\n", word.c_str(), numParticles); vfile >> word >> word >> word >>word;//Comment line, x,y,z,tetID for (int i = 0; i < N; ++i) { Particle pos; vfile >> pos.x >> pos.y >> pos.z >> word; pos.w = true; particle_loc.push_back(pos); if (i < 5) std::cout << "Seeding Pos" << i << " " << vec4d(particle_loc[i].x, particle_loc[i].y, particle_loc[i].z, particle_loc[i].w) << std::endl; } vfile.close(); } //Upload to GPU cudaCheck(cudaMemcpy(d_particles, particle_loc.data(), N * sizeof(particle_loc[0]), cudaMemcpyHostToDevice)); cudaCheck(cudaDeviceSynchronize()); } //[Device] Init Random particles (pos and vel) in a box __global__ void evalTimestep(int NumTets, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_TetVelocities, double diffusionCoef, double* d_dt) { int tetID = threadIdx.x + blockDim.x * blockIdx.x; if (tetID >= NumTets) return; vec4i index = d_tetIndices[tetID]; const vec3d A = d_vertexPositions[index.x]; const vec3d B = d_vertexPositions[index.y]; const vec3d C = d_vertexPositions[index.z]; const vec3d D = d_vertexPositions[index.w]; const double volume = dot(D - A, cross(B - A, C - A)); const double grid_h = cbrt(volume); //Velocity timestep constrains : not exceed half of grid size const vec3d vel = d_TetVelocities[tetID]; double max_vel_disp = length(vel); const double dt_vel = 0.5*grid_h/ max_vel_disp; //Brownian motion timestep constrians : const double dt_vel_brownian = (sqrt(6 * diffusionCoef + 2 * max_vel_disp * grid_h) - sqrt(6 * diffusionCoef)) / (2 * max_vel_disp); const double dt_estimate = abs(min(dt_vel_brownian, dt_vel)); d_dt[tetID] = dt_estimate<1e-8?1.12345678:dt_estimate; //if (d_dt[tetID] < 1e-2) // printf("TetID=%d Vel=%f,%f,%f Velocity Disp=%f\n grid_size=%f, dt_vel=%f, dt_vel+Brownian=%f\n", tetID, // vel.x, vel.y, vel.z, max_vel_disp, // grid_h, dt_vel, dt_vel_brownian); } //[Host] Estimate stable step size based on local element size double advect::cudaEvalTimestep(int NumTets, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_Velocities, std::string mode) { thrust::device_vector<double> d_dt_thrust(NumTets, 1000.0); double* d_dt = thrust::raw_pointer_cast(d_dt_thrust.data()); int blockDims = 128; int gridDims = divRoundUp(NumTets, blockDims); evalTimestep << <gridDims, blockDims >> > (NumTets, d_tetIndices, d_vertexPositions, d_Velocities, diffusionCoef, d_dt); cudaCheck(cudaDeviceSynchronize()); // setup arguments minmax_unary_op<double> unary_op; minmax_binary_op<double> binary_op; // initialize reduction with the first value minmax_pair<double> init = unary_op(d_dt_thrust[0]); // compute minimum and maximum values minmax_pair<double> result = thrust::transform_reduce(d_dt_thrust.begin(), d_dt_thrust.end(), unary_op, init, binary_op); std::cout << "#adv: minimum dt= " << result.min_val << std::endl; std::cout << "#adv: maximum dt= " << result.max_val << std::endl; return result.min_val; } //----------------------Advection-------------------- //[Device] Pk tet velocity interpolation __global__ void particleAdvectKernel(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_vertexVelocities) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } vec4i index = d_tetIndices[tetID]; const vec3d P = vec3d(p.x, p.y, p.z); const vec3d A = d_vertexPositions[index.x]; const vec3d B = d_vertexPositions[index.y]; const vec3d C = d_vertexPositions[index.z]; const vec3d D = d_vertexPositions[index.w]; const double den = det(A, B, C, D); if (den == 0.f) {//We are in a bad tet, set tetID=-2 p.w = false; return; } const double wA = det(P, B, C, D) * (1. / den); const double wB = det(A, P, C, D) * (1. / den); const double wC = det(A, B, P, D) * (1. / den); const double wD = det(A, B, C, P) * (1. / den); const vec3d velA = d_vertexVelocities[index.x]; const vec3d velB = d_vertexVelocities[index.y]; const vec3d velC = d_vertexVelocities[index.z]; const vec3d velD = d_vertexVelocities[index.w]; const vec3d vel = wA * velA + wB * velB + wC * velC + wD * velD; //First-order Euler Integration const vec3d P_next = P + dt * vel; const vec3d P_disp = P_next - P; d_vels[particleID] = vec4d(vel.x, vel.y, vel.z, -1.0); d_disp[particleID] = vec4d(P_disp.x, P_disp.y, P_disp.z, -1.0); //p.x = P_next.x; p.y = P_next.y; p.z = P_next.z; /* if (particleID == 98550) printf("%d [Advect] TetID=%d P(%f,%f,%f) Disp(%f,%f,%f) Vel(%f,%f,%f)\n", particleID, tetID, P.x, P.y, P.z, P_disp.x, P_disp.y, P_disp.z, vel.x, vel.y, vel.z); */ } //[Device] RT0 tet velocity interpolation __global__ void particleAdvectKernelTetVel(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_TetVelocities) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } vec4i index = d_tetIndices[tetID]; const vec3d P = vec3d(p.x, p.y, p.z); const vec3d A = d_vertexPositions[index.x]; const vec3d B = d_vertexPositions[index.y]; const vec3d C = d_vertexPositions[index.z]; const vec3d D = d_vertexPositions[index.w]; const double den = det(A, B, C, D); if (den == 0.f) {//We are in a bad tet, set tetID=-2 p.w = false; return; } const vec3d vel = (vec3d&)d_TetVelocities[tetID]; //First-order Euler Integration const vec3d P_next = P + dt * vel; const vec3d P_disp = P_next - P; d_vels[particleID] = vec4d(vel.x, vel.y, vel.z, -1.0); d_disp[particleID] = vec4d(P_disp.x, P_disp.y, P_disp.z, -1.0); //p.x = P_next.x; p.y = P_next.y; p.z = P_next.z; /* if (particleID <100) printf("%d [Advect] TetID=%d P(%f,%f,%f) Disp(%f,%f,%f) Vel(%f,%f,%f)\n", particleID, tetID, P.x, P.y, P.z, P_disp.x, P_disp.y, P_disp.z, vel.x, vel.y, vel.z); */ } //[Device] Constant initial velocity interpolation __global__ void particleAdvectConstVel(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } //Constant velocity, First-order Euler Integration vec4d& vel = d_vels[particleID]; d_disp[particleID] = vec4d(vel.x*dt, vel.y * dt, vel.z * dt, -1.0); } // [Velocity] Tet velocity interpolation // output: d_vels void cudaAdvect(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, vec4i* d_tetIndices, vec3d* d_vertexPositions, vec3d* d_Velocities, std::string mode) { int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); if (mode == "TetVelocity") { particleAdvectKernelTetVel << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles, d_tetIndices, d_vertexPositions, d_Velocities); } if (mode == "VertexVelocity") particleAdvectKernel << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles, d_tetIndices, d_vertexPositions, d_Velocities); if (mode == "ConstantVelocity") particleAdvectConstVel << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles); cudaCheck(cudaDeviceSynchronize()); } //[Device] Constant initial velocity interpolation __device__ double SquareDuct_analyticalVel(double x,double y, double h,double L, double dp,double mu) { //10.1103/PhysRevE.71.057301 double vz = 0.0; for (int i = 0; i < 20; i++) { double n = 2.0 * i + 1.0; vz += 1 / (n * n * n) * ( 1.0 - cosh(n* M_PI*x/h) / cosh(n*M_PI/2.0) )*sin(n * M_PI * y/ h); } vz = -dp / L / mu * 4.0 * h * h / M_PI / M_PI / M_PI * vz; return vz; } __global__ void particleTubeAdvect(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles, double h, double L, double dp, double mu) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const int tetID = d_tetIDs[particleID]; if (tetID < 0) {//tet=-1 // this particle left the domain p.w = false; return; } //Constant velocity, First-order Euler Integration vec4d& vel = d_vels[particleID]; vel.x = 0.0; vel.y = 0.0; vel.z = SquareDuct_analyticalVel(p.x, p.y, h, L, dp, mu); //printf("%lf %lf %lf->vel=%lf,%lf,%lf\n", p.x, p.y, p.z, vel.x, vel.y, vel.z); d_disp[particleID] = vec4d(vel.x * dt, vel.y * dt, vel.z * dt, -1.0); //if(particleID<5) // printf("(%lf,%lf,%lf) Vel=(%lf,%lf,%lf)\n", p.x, p.y, p.z, vel.x, vel.y, vel.z); } void advect::cudaTubeAdvect(Particle* d_particles, int* d_tetIDs, vec4d* d_vels, vec4d* d_disp, double dt, int numParticles) { double L = 30;//cm double h = 0.1;//cm double mu = 0.001072;//Pa s double dp = -4.904871302657455;//Pa double Q = 0.000536;//cm^3/s int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); particleTubeAdvect << <gridDims, blockDims >> > (d_particles, d_tetIDs, d_vels, d_disp, dt, numParticles, h, L, dp, mu); cudaCheck(cudaDeviceSynchronize()); //system("pause"); } //----------------------Brownian motion-------------------- #include <thrust/execution_policy.h> struct InitCURAND { unsigned long long seed; curandState_t* states; InitCURAND(unsigned long long _seed, curandState_t* _states) { seed = _seed; states = _states; } __device__ void operator()(unsigned int i) { curand_init(seed, i, 0, &states[i]); } }; void advect::initRandomGenerator(int numParticles, curandState_t* rand_states){ //Each particle has its own random generator for each thread long int rng_seed = time(NULL); rng_seed = 1591593751; printf("#adv: Random Seed=%d\n", rng_seed); thrust::counting_iterator<unsigned int> count(0); thrust::for_each(count, count + numParticles, InitCURAND(rng_seed, rand_states)); } //[Device] Constant initial velocity interpolation __global__ void particleBrownianMotion(Particle* d_particles, vec4d* d_disp, double D, curandState_t* states, double dt, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return; const double randDisp = sqrt(2.00 * D * dt); const double randXi0 = curand_normal_double(&states[particleID]); const double randXi1 = curand_normal_double(&states[particleID]); const double randXi2 = curand_normal_double(&states[particleID]); d_disp[particleID] += vec4d(randXi0, randXi1, randXi2, 0.0)*randDisp; //if (particleID == 170) //printf("%d D=%f dt=%f Random disp (%f,%f,%f)-(%f,%f,%f) Disp %lf\n", particleID, D,dt, // d_disp[particleID].x, d_disp[particleID].y, d_disp[particleID].z, // randXi0, randXi1, randXi2,randDisp); } void advect::cudaBrownianMotion(Particle* d_particles, vec4d* d_disp, curandState_t* states, double dt, int numParticles) {//Brownian motion //double diffusionCoef = 0.1;// kbT/(6*pi*mu*rp) um^2/s int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); diffusionCoef = 5.7e-6; particleBrownianMotion << <gridDims, blockDims >> > (d_particles, d_disp, diffusionCoef, states, dt, numParticles); cudaCheck(cudaDeviceSynchronize()); //system("pause"); } __global__ void checkParticles(int* d_triIDs, bool * isHit, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; //if (particleID == 52747) //printf("Check Pts %d %d OldStatus=%d\n",particleID, d_triIDs[particleID], isHit[particleID]); if (d_triIDs[particleID] == -1) return; isHit[particleID] = true; } //----------------------Move-------------------- __global__ void particleMoveKernel(Particle* d_particles, vec4d* d_vels, double dt, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; if (!p.w) return;//particle is out of domain const vec4d P = vec4d(p.x, p.y, p.z, p.w); const vec4d vel = d_vels[particleID]; const vec4d P_next = P + vel * dt; //if (particleID == 5) // printf("[Move] %d (%f,%f,%f)->(%f,%f,%f) @ Vel(%f,%f,%f)\n", particleID, // p.x, p.y, p.z, P_next.x, P_next.y, P_next.z, // vel.x, vel.y, vel.z); p.x = P_next.x; p.y = P_next.y; p.z = P_next.z; } //[Host] Move particles x1 = x0 + vel*dt and do specular reflection if hit the wall // d_tetIDs used to determine the status of a particle void advect::cudaMoveParticles(Particle* d_particles, vec4d* d_vels, double dt, int numParticles, int* d_tetIDs){ int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); //Move particles particleMoveKernel << <gridDims, blockDims >> > (d_particles, d_vels, dt, numParticles); cudaCheck(cudaDeviceSynchronize()); } __global__ void particleMoveKernel(Particle* d_particles, vec4d* d_disps, int* d_tetIDs, int numParticles) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; Particle& p = d_particles[particleID]; vec4d& disp = d_disps[particleID]; /* if (!p.w) { //if (particleID == 33216) //if (particleID == 52747 || particleID == 98550 || particleID == 83144) //printf("[Move] %d (%.15lf,%.15lf,%.15lf)->Disp(%.15f,%.15f,%.15f)\n", particleID, // p.x, p.y, p.z, // disp.x, disp.y, disp.z); } */ if (!p.w) return;//particle is out of domain /* const vec4d P = vec4d(p.x, p.y, p.z, p.w); const vec4d P_next = P + disp; if (particleID == 50606) { //if (particleID == 52747 || particleID == 98550 || particleID == 83144) printf("[Move] %d tetID=%d (%.15lf,%.15lf,%.15lf)->(%.15f,%.15f,%.15f) Disp(%.15f,%.15f,%.15f)\n", particleID, d_tetIDs[particleID], p.x, p.y, p.z, P_next.x, P_next.y, P_next.z, disp.x, disp.y, disp.z); } */ //Update location p.x += disp.x; p.y += disp.y; p.z += disp.z; //Reset disp for the next iteration disp.x = 0.0; disp.y = 0.0; disp.z = 0.0; //disp.w = 0.0; } void advect::cudaMoveParticles(Particle* d_particles, vec4d* d_disps, int numParticles, int* d_tetIDs) { int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); //Move particles particleMoveKernel << <gridDims, blockDims >> > (d_particles, d_disps, d_tetIDs,numParticles); cudaCheck(cudaDeviceSynchronize()); } //-------------------------Debug----------------------- __global__ void reportParticles(int* tetIDs, int numParticles, int TagID) { int particleID = threadIdx.x + blockDim.x * blockIdx.x; if (particleID >= numParticles) return; if (tetIDs[particleID] <= TagID) printf("--Particle [%d] TagID=%d\n", particleID, tetIDs[particleID]); } //[Host] Check particle status based on tetID void advect::cudaReportParticles(int numParticles, int* d_tetIDs) { thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(d_tetIDs); int blockDims = 128; int gridDims = divRoundUp(numParticles, blockDims); int NumBadParticles = thrust::count_if(thrust::device, dev_ptr, dev_ptr + numParticles, negative<int>()); printf("#adv: Out-of-domain particles(-tetID) = %d\n", NumBadParticles); if (NumBadParticles > 0) { reportParticles << <gridDims, blockDims >> > (d_tetIDs, numParticles, -1); cudaCheck(cudaDeviceSynchronize()); } } }
fd129999216f447ef6a5f79e97b79b61f31b8c3d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <cassert> #include <cudaTools/diagnostic.h> __device__ float deviceValue; int main() { // Upload value -> deviceValue. float value = 3.14f; CUDA_CHECK( hipMemcpyToSymbol( deviceValue, &value, sizeof( float ) ) ); // Check deviceValue size. size_t deviceValueSize; CUDA_CHECK( hipGetSymbolSize( &deviceValueSize, deviceValue ) ); assert( deviceValueSize == 4 ); printf( "deviceValue size: %lu\n", deviceValueSize ); // Download deviceValue -> downloadedValue; float downloadedValue; CUDA_CHECK( hipMemcpyFromSymbol( &downloadedValue, deviceValue, sizeof( float ) ) ); printf( "Downloaded value: %f\n", downloadedValue ); }
fd129999216f447ef6a5f79e97b79b61f31b8c3d.cu
#include <stdio.h> #include <cassert> #include <cudaTools/diagnostic.h> __device__ float deviceValue; int main() { // Upload value -> deviceValue. float value = 3.14f; CUDA_CHECK( cudaMemcpyToSymbol( deviceValue, &value, sizeof( float ) ) ); // Check deviceValue size. size_t deviceValueSize; CUDA_CHECK( cudaGetSymbolSize( &deviceValueSize, deviceValue ) ); assert( deviceValueSize == 4 ); printf( "deviceValue size: %lu\n", deviceValueSize ); // Download deviceValue -> downloadedValue; float downloadedValue; CUDA_CHECK( cudaMemcpyFromSymbol( &downloadedValue, deviceValue, sizeof( float ) ) ); printf( "Downloaded value: %f\n", downloadedValue ); }
97d4987aa6eacfc092219049994d0349a3b24674.hip
// !!! This is a file automatically generated by hipify!!! #include "createParticles.hpp" #include "rk4.hpp" #include <cmath> float GetGyroPhase(const C3<float> v_abp) { // alp is mostly in the x / r direction // bet is mostly z direction float alp = v_abp.c1; float bet = v_abp.c2; return atan2(alp, bet); } PRAGMA HOST DEVICE float maxwellian(float vx, float vy, float vz, float vTh) { float weight_x = 1.0 / (vTh * std::sqrt(physConstants::pi)) * ::exp(-::pow(vx, 2) / ::pow(vTh, 2)); float weight_y = 1.0 / (vTh * std::sqrt(physConstants::pi)) * ::exp(-::pow(vy, 2) / ::pow(vTh, 2)); float weight_z = 1.0 / (vTh * std::sqrt(physConstants::pi)) * ::exp(-::pow(vz, 2) / ::pow(vTh, 2)); return weight_x * weight_y * weight_z; } PRAGMA HOST DEVICE float get_vTh(const float _amu, const float _Z, const float _T_keV) { float m = _amu * physConstants::amu; float kT_joule = _T_keV * 1e3 * physConstants::e; // This may actually be E_keV so may need a 3/2 somewhere float vTh = std::sqrt(2.0 * kT_joule / m); return vTh; } PRAGMA HOST DEVICE C3<float> maxwellian_df0_dv(const C3<float> _v, const float _T_keV, const float _n_m3, const float _amu, const float _Z) { C3<float> df0_dv; float vTh = get_vTh(_amu, _Z, _T_keV); float _vx = _v.c1; float _vy = _v.c2; float _vz = _v.c3; // Get the 3 components of df0_dv at this point in velocity space float h = vTh / 1000.0; float vxL = _vx - h; float vxR = _vx + h; float fL = maxwellian(vxL, _vy, _vz, vTh); float fR = maxwellian(vxR, _vy, _vz, vTh); float _df0_dv = (-fL + fR) / (2 * h); df0_dv.c1 = _df0_dv * _n_m3; float vyL = _vy - h; float vyR = _vy + h; fL = maxwellian(_vx, vyL, _vz, vTh); fR = maxwellian(_vx, vyR, _vz, vTh); _df0_dv = (-fL + fR) / (2 * h); df0_dv.c2 = _df0_dv * _n_m3; float vzL = _vz - h; float vzR = _vz + h; fL = maxwellian(_vx, _vy, vzL, vTh); fR = maxwellian(_vx, _vy, vzR, vTh); _df0_dv = (-fL + fR) / (2 * h); df0_dv.c3 = _df0_dv * _n_m3; return df0_dv; } vector<CParticle> create_particles(float x, float amu, float Z, float T_keV, float n_m3, int nPx, int nPy, int nPz, int nThermal, float& dv, float *r, C3<float> *b0_CYL, int nR) { vector<CParticle> pList; int nP = nPx * nPy * nPz; pList.resize(nP); float vTh = get_vTh(amu, Z, T_keV); #if DEBUG_MAXWELLIAN >= 1 cout << "amu: " << amu << endl; cout << "Z: " << Z << endl; cout << "vTh: " << vTh << endl; #endif float vxRange = vTh * nThermal * 2; float vxMin = -vxRange / 2.0; float dvx = vxRange / (nPx - 1); float vyRange = vTh * nThermal * 2; float vyMin = -vyRange / 2.0; float dvy = vyRange / (nPy - 1); float vzRange = vTh * nThermal * 2; float vzMin = -vzRange / 2.0; float dvz = vzRange / (nPz - 1); dv = dvx * dvy * dvz; // Return the Jacobian (volume element for integration later) float TestIntegratedValue = 0; int cnt = 0; for (int i = 0; i < nPx; i++) { for (int j = 0; j < nPy; j++) { for (int k = 0; k < nPz; k++) { float thisvx = vxMin + i * dvx; float thisvy = vyMin + j * dvy; float thisvz = vzMin + k * dvz; float weight = maxwellian(thisvx, thisvy, thisvz, vTh) * n_m3; TestIntegratedValue += weight * dv; CParticle p(x, 0.0, 0.0, thisvx, thisvy, thisvz, amu, Z, weight, T_keV, n_m3); pList[cnt] = p; pList[cnt].number = cnt; pList[cnt].vTh = vTh; pList[cnt].d3v = dv; // Get vPar, vPer and mu for guiding center integration C3<float> thisV_XYZ(thisvx, thisvy, thisvz); int iStat = 0; C3<float> this_b0_CYL = kj_interp1D(x, r, b0_CYL, nR, iStat); if(iStat>0) { cout << "ERROR : Interpolation failure on b0_CYL" << endl; exit(1); } C3<float> this_b0_XYZ = rot_CYL_to_XYZ(0, this_b0_CYL, 1); float bMag = mag(this_b0_XYZ); float vMag = mag(thisV_XYZ); C3<float> thisV_abp = rot_XYZ_to_abp(thisV_XYZ, this_b0_XYZ, 0); pList[cnt].vPar = thisV_abp.c3; pList[cnt].vPer = std::sqrt(::pow(thisV_abp.c1, 2) + ::pow(thisV_abp.c2, 2)); pList[cnt].gyroPhase = GetGyroPhase(thisV_abp); pList[cnt].u = pList[cnt].m * ::pow(pList[cnt].vPer, 2) / (2.0 * bMag); #if GC_ORBITS > 0 // Update the starting point to be at the guiding center int nTGC = 40; float wc = std::abs(pList[cnt].q * bMag / pList[cnt].m); float dTGC = 2*physConstants::pi/wc/nTGC; CParticle pGC(pList[cnt]); float averageX=0; float averageY=0; float averageZ=0; int MoveStatus = 0; for(int iGC=0; iGC<nTGC; iGC++) { MoveStatus = rk4_move(pGC, dTGC, r, b0_CYL, nR); averageX += pGC.c1; averageY += pGC.c2; averageZ += pGC.c3; } averageX = averageX/nTGC; averageY = averageY/nTGC; averageZ = averageZ/nTGC; pList[cnt].c1 = averageX; pList[cnt].c2 = averageY; pList[cnt].c3 = averageZ; #endif #if DEBUG_MAXWELLIAN >= 2 cout << "ThisVx: " << thisvx << endl; cout << "ThisVy: " << thisvy << endl; cout << "ThisVz: " << thisvz << endl; cout << "b0_XYZ: " << this_b0_XYZ.c1 << ", " << this_b0_XYZ.c2 << ", " << this_b0_XYZ.c3 << endl; cout << "vMag: " << vMag << endl; cout << "vPer: " << pList[cnt].vPer << endl; cout << "vPar: " << pList[cnt].vPar << endl; cout << "u: " << pList[cnt].u << endl << endl; if (isnan(pList[cnt].u)) exit(1); if (vMag > 3e8) exit(1); #endif cnt++; } } } #if DEBUG_MAXWELLIAN >= 1 cout << "TestIntegratedValue: " << TestIntegratedValue << endl; #endif return pList; }
97d4987aa6eacfc092219049994d0349a3b24674.cu
#include "createParticles.hpp" #include "rk4.hpp" #include <cmath> float GetGyroPhase(const C3<float> v_abp) { // alp is mostly in the x / r direction // bet is mostly z direction float alp = v_abp.c1; float bet = v_abp.c2; return atan2(alp, bet); } PRAGMA HOST DEVICE float maxwellian(float vx, float vy, float vz, float vTh) { float weight_x = 1.0 / (vTh * std::sqrt(physConstants::pi)) * std::exp(-std::pow(vx, 2) / std::pow(vTh, 2)); float weight_y = 1.0 / (vTh * std::sqrt(physConstants::pi)) * std::exp(-std::pow(vy, 2) / std::pow(vTh, 2)); float weight_z = 1.0 / (vTh * std::sqrt(physConstants::pi)) * std::exp(-std::pow(vz, 2) / std::pow(vTh, 2)); return weight_x * weight_y * weight_z; } PRAGMA HOST DEVICE float get_vTh(const float _amu, const float _Z, const float _T_keV) { float m = _amu * physConstants::amu; float kT_joule = _T_keV * 1e3 * physConstants::e; // This may actually be E_keV so may need a 3/2 somewhere float vTh = std::sqrt(2.0 * kT_joule / m); return vTh; } PRAGMA HOST DEVICE C3<float> maxwellian_df0_dv(const C3<float> _v, const float _T_keV, const float _n_m3, const float _amu, const float _Z) { C3<float> df0_dv; float vTh = get_vTh(_amu, _Z, _T_keV); float _vx = _v.c1; float _vy = _v.c2; float _vz = _v.c3; // Get the 3 components of df0_dv at this point in velocity space float h = vTh / 1000.0; float vxL = _vx - h; float vxR = _vx + h; float fL = maxwellian(vxL, _vy, _vz, vTh); float fR = maxwellian(vxR, _vy, _vz, vTh); float _df0_dv = (-fL + fR) / (2 * h); df0_dv.c1 = _df0_dv * _n_m3; float vyL = _vy - h; float vyR = _vy + h; fL = maxwellian(_vx, vyL, _vz, vTh); fR = maxwellian(_vx, vyR, _vz, vTh); _df0_dv = (-fL + fR) / (2 * h); df0_dv.c2 = _df0_dv * _n_m3; float vzL = _vz - h; float vzR = _vz + h; fL = maxwellian(_vx, _vy, vzL, vTh); fR = maxwellian(_vx, _vy, vzR, vTh); _df0_dv = (-fL + fR) / (2 * h); df0_dv.c3 = _df0_dv * _n_m3; return df0_dv; } vector<CParticle> create_particles(float x, float amu, float Z, float T_keV, float n_m3, int nPx, int nPy, int nPz, int nThermal, float& dv, float *r, C3<float> *b0_CYL, int nR) { vector<CParticle> pList; int nP = nPx * nPy * nPz; pList.resize(nP); float vTh = get_vTh(amu, Z, T_keV); #if DEBUG_MAXWELLIAN >= 1 cout << "amu: " << amu << endl; cout << "Z: " << Z << endl; cout << "vTh: " << vTh << endl; #endif float vxRange = vTh * nThermal * 2; float vxMin = -vxRange / 2.0; float dvx = vxRange / (nPx - 1); float vyRange = vTh * nThermal * 2; float vyMin = -vyRange / 2.0; float dvy = vyRange / (nPy - 1); float vzRange = vTh * nThermal * 2; float vzMin = -vzRange / 2.0; float dvz = vzRange / (nPz - 1); dv = dvx * dvy * dvz; // Return the Jacobian (volume element for integration later) float TestIntegratedValue = 0; int cnt = 0; for (int i = 0; i < nPx; i++) { for (int j = 0; j < nPy; j++) { for (int k = 0; k < nPz; k++) { float thisvx = vxMin + i * dvx; float thisvy = vyMin + j * dvy; float thisvz = vzMin + k * dvz; float weight = maxwellian(thisvx, thisvy, thisvz, vTh) * n_m3; TestIntegratedValue += weight * dv; CParticle p(x, 0.0, 0.0, thisvx, thisvy, thisvz, amu, Z, weight, T_keV, n_m3); pList[cnt] = p; pList[cnt].number = cnt; pList[cnt].vTh = vTh; pList[cnt].d3v = dv; // Get vPar, vPer and mu for guiding center integration C3<float> thisV_XYZ(thisvx, thisvy, thisvz); int iStat = 0; C3<float> this_b0_CYL = kj_interp1D(x, r, b0_CYL, nR, iStat); if(iStat>0) { cout << "ERROR : Interpolation failure on b0_CYL" << endl; exit(1); } C3<float> this_b0_XYZ = rot_CYL_to_XYZ(0, this_b0_CYL, 1); float bMag = mag(this_b0_XYZ); float vMag = mag(thisV_XYZ); C3<float> thisV_abp = rot_XYZ_to_abp(thisV_XYZ, this_b0_XYZ, 0); pList[cnt].vPar = thisV_abp.c3; pList[cnt].vPer = std::sqrt(std::pow(thisV_abp.c1, 2) + std::pow(thisV_abp.c2, 2)); pList[cnt].gyroPhase = GetGyroPhase(thisV_abp); pList[cnt].u = pList[cnt].m * std::pow(pList[cnt].vPer, 2) / (2.0 * bMag); #if GC_ORBITS > 0 // Update the starting point to be at the guiding center int nTGC = 40; float wc = std::abs(pList[cnt].q * bMag / pList[cnt].m); float dTGC = 2*physConstants::pi/wc/nTGC; CParticle pGC(pList[cnt]); float averageX=0; float averageY=0; float averageZ=0; int MoveStatus = 0; for(int iGC=0; iGC<nTGC; iGC++) { MoveStatus = rk4_move(pGC, dTGC, r, b0_CYL, nR); averageX += pGC.c1; averageY += pGC.c2; averageZ += pGC.c3; } averageX = averageX/nTGC; averageY = averageY/nTGC; averageZ = averageZ/nTGC; pList[cnt].c1 = averageX; pList[cnt].c2 = averageY; pList[cnt].c3 = averageZ; #endif #if DEBUG_MAXWELLIAN >= 2 cout << "ThisVx: " << thisvx << endl; cout << "ThisVy: " << thisvy << endl; cout << "ThisVz: " << thisvz << endl; cout << "b0_XYZ: " << this_b0_XYZ.c1 << ", " << this_b0_XYZ.c2 << ", " << this_b0_XYZ.c3 << endl; cout << "vMag: " << vMag << endl; cout << "vPer: " << pList[cnt].vPer << endl; cout << "vPar: " << pList[cnt].vPar << endl; cout << "u: " << pList[cnt].u << endl << endl; if (isnan(pList[cnt].u)) exit(1); if (vMag > 3e8) exit(1); #endif cnt++; } } } #if DEBUG_MAXWELLIAN >= 1 cout << "TestIntegratedValue: " << TestIntegratedValue << endl; #endif return pList; }
ad27ef9922cf95f3f6c54d6cd8e58f2b18186027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ void get_largest_value(short *vec, const int vec_length, int *max) { for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) { atomicMax(max, vec[i]); } } __global__ void get_largest_value(int *vec, const int vec_length, int* max) { for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) { atomicMax(max, vec[i]); } }
ad27ef9922cf95f3f6c54d6cd8e58f2b18186027.cu
#include "includes.h" __device__ void get_largest_value(short *vec, const int vec_length, int *max) { for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) { atomicMax(max, vec[i]); } } __global__ void get_largest_value(int *vec, const int vec_length, int* max) { for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) { atomicMax(max, vec[i]); } }
bdd6aee7c4bd0710fed21fe8b4bab30691915623.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <string.h> #include <hip/hip_runtime.h> #include "hashiru_cuda.cuh" // A GPU version of the toy hash function from earlier. // Could be done in a more parallel manner, but at this // time I just want it to work. __device__ void cuda_hash(const char *in, const int len, char *out) { char c = 0; for(int i = 0; i < len; i++) { c += (char)in[i]; } c = 97 + c % 26; out[0] = c; for(int i = 1; i < 32; i++) { out[i] = 'F'; } out[32] = '\0'; } __global__ void cudaCrackHashKernel(const char *dict, const int max_length, const int dict_size, const char *to_crack, int *correct_idx) { // Calculate this thread's index. unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // Set aside some memory for the GPU hash function to write // to. Suboptimal, but it works. char *current, *cur_hash = (char *)malloc(33 * sizeof(char)); int equal, len; char *c; // Iterate over the whole dictionary. while(idx < dict_size) { // Get the current word for consideration. current = (char *) (dict + idx * (max_length + 1)); // Calculate its length in a loop. Again, janky and not // parallel, but it works. len = 0; c = current; while(*c != '\0') { len++; c++; } // Nuke the hash buffer, and call the GPU hash function. memset(cur_hash, 0, 33); cuda_hash(current, len, cur_hash); // Super sketchy strcmp implementation. Not parallel // and not efficient, but hopefully it should work. equal = 1; for(int i = 0; i < 32; i++) { if(to_crack[i] != cur_hash[i]) equal = 0; } // Only if you stumble across the answer do you update // correct_idx. If a collision occurs, it only matters // that one of the correct answers gets written, not // which one. if(equal) { *correct_idx = idx; break; } idx += blockDim.x * gridDim.x; } } void cudaCallCrackHashKernel(const unsigned int blocks, const unsigned int threadsPerBlock, const char *dict, const int max_length, const int dict_size, const char *to_crack, int *correct_idx) { // Call the kernel with the appropriate parameters. hipLaunchKernelGGL(( cudaCrackHashKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, dict, max_length, dict_size, to_crack, correct_idx); }
bdd6aee7c4bd0710fed21fe8b4bab30691915623.cu
#include <cstdio> #include <string.h> #include <cuda_runtime.h> #include "hashiru_cuda.cuh" // A GPU version of the toy hash function from earlier. // Could be done in a more parallel manner, but at this // time I just want it to work. __device__ void cuda_hash(const char *in, const int len, char *out) { char c = 0; for(int i = 0; i < len; i++) { c += (char)in[i]; } c = 97 + c % 26; out[0] = c; for(int i = 1; i < 32; i++) { out[i] = 'F'; } out[32] = '\0'; } __global__ void cudaCrackHashKernel(const char *dict, const int max_length, const int dict_size, const char *to_crack, int *correct_idx) { // Calculate this thread's index. unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // Set aside some memory for the GPU hash function to write // to. Suboptimal, but it works. char *current, *cur_hash = (char *)malloc(33 * sizeof(char)); int equal, len; char *c; // Iterate over the whole dictionary. while(idx < dict_size) { // Get the current word for consideration. current = (char *) (dict + idx * (max_length + 1)); // Calculate its length in a loop. Again, janky and not // parallel, but it works. len = 0; c = current; while(*c != '\0') { len++; c++; } // Nuke the hash buffer, and call the GPU hash function. memset(cur_hash, 0, 33); cuda_hash(current, len, cur_hash); // Super sketchy strcmp implementation. Not parallel // and not efficient, but hopefully it should work. equal = 1; for(int i = 0; i < 32; i++) { if(to_crack[i] != cur_hash[i]) equal = 0; } // Only if you stumble across the answer do you update // correct_idx. If a collision occurs, it only matters // that one of the correct answers gets written, not // which one. if(equal) { *correct_idx = idx; break; } idx += blockDim.x * gridDim.x; } } void cudaCallCrackHashKernel(const unsigned int blocks, const unsigned int threadsPerBlock, const char *dict, const int max_length, const int dict_size, const char *to_crack, int *correct_idx) { // Call the kernel with the appropriate parameters. cudaCrackHashKernel<<<blocks, threadsPerBlock>>>(dict, max_length, dict_size, to_crack, correct_idx); }
3bb5f8dc1dba1ca7aa2441394b9d8a79e19eaafb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include<time.h> #define SIZE 10 __global__ void VectorMatrixMult(int a[], int b[], int c[], int n) { int i = threadIdx.x; if(i < n){ //j is used for iterating through columns;--- for(int j=0; j<SIZE; j++){ c[i] +=(a[j] * *(b + i*SIZE + j)); } } } int main() { int *a, *b, *c; clock_t t; //vector a = (int*)malloc(SIZE * sizeof(int)); //matrix b = (int*)malloc(SIZE * SIZE * sizeof(int)); //result c = (int*)malloc(SIZE * sizeof(int)); for (int i = 0; i < SIZE; i++){ a[i] = i+1; for (int j = 0; j < SIZE; j++){ *(b + i*SIZE + j) = i*j; //int index=blockDim.x * blockIdx.x + threadIdx.x; } } int *d_a, *d_b, *d_c; int size=SIZE * sizeof(int); int size2d=SIZE * SIZE * sizeof(int); hipMalloc(&d_a, size); hipMalloc(&d_b, size2d); hipMalloc(&d_c, size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size2d, hipMemcpyHostToDevice); t=clock(); hipLaunchKernelGGL(( VectorMatrixMult) , dim3(1), dim3(SIZE) , 0, 0, d_a, d_b, d_c, SIZE); hipDeviceSynchronize(); t=clock()-t; double t2=((double)t/CLOCk_PER_SEC); hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); printf("Vector: \n"); for (int i = 0; i < SIZE; i++){ printf("%d ", a[i]); } printf("\n"); printf("Matrix: \n"); for (int i = 0; i < SIZE; i++){ for (int j = 0; j < SIZE; j++){ printf("%d ", *(b + i*SIZE + j)); } printf("\n"); } printf("Product: \n"); for (int i = 0; i < SIZE; i++){ printf("%d ", c[i]); } printf("\n"); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(a); free(b); free(c); return 0; }
3bb5f8dc1dba1ca7aa2441394b9d8a79e19eaafb.cu
#include <stdio.h> #include<time.h> #define SIZE 10 __global__ void VectorMatrixMult(int a[], int b[], int c[], int n) { int i = threadIdx.x; if(i < n){ //j is used for iterating through columns;--- for(int j=0; j<SIZE; j++){ c[i] +=(a[j] * *(b + i*SIZE + j)); } } } int main() { int *a, *b, *c; clock_t t; //vector a = (int*)malloc(SIZE * sizeof(int)); //matrix b = (int*)malloc(SIZE * SIZE * sizeof(int)); //result c = (int*)malloc(SIZE * sizeof(int)); for (int i = 0; i < SIZE; i++){ a[i] = i+1; for (int j = 0; j < SIZE; j++){ *(b + i*SIZE + j) = i*j; //int index=blockDim.x * blockIdx.x + threadIdx.x; } } int *d_a, *d_b, *d_c; int size=SIZE * sizeof(int); int size2d=SIZE * SIZE * sizeof(int); cudaMalloc(&d_a, size); cudaMalloc(&d_b, size2d); cudaMalloc(&d_c, size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size2d, cudaMemcpyHostToDevice); t=clock(); VectorMatrixMult <<< 1, SIZE >>> (d_a, d_b, d_c, SIZE); cudaDeviceSynchronize(); t=clock()-t; double t2=((double)t/CLOCk_PER_SEC); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); printf("Vector: \n"); for (int i = 0; i < SIZE; i++){ printf("%d ", a[i]); } printf("\n"); printf("Matrix: \n"); for (int i = 0; i < SIZE; i++){ for (int j = 0; j < SIZE; j++){ printf("%d ", *(b + i*SIZE + j)); } printf("\n"); } printf("Product: \n"); for (int i = 0; i < SIZE; i++){ printf("%d ", c[i]); } printf("\n"); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
f802d8a58e3b8cc99fefb3715c05eec2937feab3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper //#include <cutil.h> //#include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> /* * Options * */ #define GAMMA 1.4f #define iterations 2000 // #ifndef block_length // #define block_length 192 // #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f /* * not options */ #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE_0 RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE_0 RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_0 RD_WG_SIZE #else #define BLOCK_SIZE_0 192 #endif #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_1 RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_1 RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_1 RD_WG_SIZE #else #define BLOCK_SIZE_1 192 #endif #ifdef RD_WG_SIZE_2_0 #define BLOCK_SIZE_2 RD_WG_SIZE_2_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_2 RD_WG_SIZE_2 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_2 RD_WG_SIZE #else #define BLOCK_SIZE_2 192 #endif #ifdef RD_WG_SIZE_3_0 #define BLOCK_SIZE_3 RD_WG_SIZE_3_0 #elif defined(RD_WG_SIZE_3) #define BLOCK_SIZE_3 RD_WG_SIZE_3 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_3 RD_WG_SIZE #else #define BLOCK_SIZE_3 192 #endif #ifdef RD_WG_SIZE_4_0 #define BLOCK_SIZE_4 RD_WG_SIZE_4_0 #elif defined(RD_WG_SIZE_4) #define BLOCK_SIZE_4 RD_WG_SIZE_4 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_4 RD_WG_SIZE #else #define BLOCK_SIZE_4 192 #endif // #if block_length > 128 // #warning "the kernels may fail too launch on some systems if the block length is too large" // #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; //checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N)); hipMalloc((void**)&t, sizeof(T)*N); return t; } template <typename T> void dealloc(T* array) { //checkCudaErrors(hipFree((void*)array)); hipFree((void*)array); } template <typename T> void copy(T* dst, T* src, int N) { //checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice)); hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice); } template <typename T> void upload(T* dst, T* src, int N) { //checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice)); hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice); } template <typename T> void download(T* dst, T* src, int N) { //checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost)); hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1); hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); // getLastCudaError("initialize_variables failed"); } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); //getLastCudaError("compute_step_factor failed"); } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes); //getLastCudaError("compute_flux failed"); } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); //getLastCudaError("update failed"); } /* * Main function */ int main(int argc, char** argv) { printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4); if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; hipDeviceProp_t prop; int dev; //checkCudaErrors(hipSetDevice(0)); hipSetDevice(0); //checkCudaErrors(hipGetDevice(&dev)); hipGetDevice(&dev); //checkCudaErrors(hipGetDeviceProperties(&prop, dev)); hipGetDeviceProperties(&prop, dev); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu //checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)); //checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) ); hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)); //checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) ); hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)); //checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) ); hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)); //checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) ); hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)); } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ ::min(1, nel % BLOCK_SIZE_0)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = 0; // unsigned int timer = 0; // CUT_SAFE_CALL( cutCreateTimer( &timer)); // CUT_SAFE_CALL( cutStartTimer( timer)); sdkCreateTimer(&timer); sdkStartTimer(&timer); // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); //getLastCudaError("compute_step_factor failed"); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); //getLastCudaError("compute_flux failed"); time_step(j, nelr, old_variables, variables, step_factors, fluxes); //getLastCudaError("time_step failed"); } } hipDeviceSynchronize(); // CUT_SAFE_CALL( cutStopTimer(timer) ); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "Done..." << std::endl; return 0; }
f802d8a58e3b8cc99fefb3715c05eec2937feab3.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper //#include <cutil.h> //#include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> /* * Options * */ #define GAMMA 1.4f #define iterations 2000 // #ifndef block_length // #define block_length 192 // #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f /* * not options */ #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE_0 RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE_0 RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_0 RD_WG_SIZE #else #define BLOCK_SIZE_0 192 #endif #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_1 RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_1 RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_1 RD_WG_SIZE #else #define BLOCK_SIZE_1 192 #endif #ifdef RD_WG_SIZE_2_0 #define BLOCK_SIZE_2 RD_WG_SIZE_2_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_2 RD_WG_SIZE_2 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_2 RD_WG_SIZE #else #define BLOCK_SIZE_2 192 #endif #ifdef RD_WG_SIZE_3_0 #define BLOCK_SIZE_3 RD_WG_SIZE_3_0 #elif defined(RD_WG_SIZE_3) #define BLOCK_SIZE_3 RD_WG_SIZE_3 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_3 RD_WG_SIZE #else #define BLOCK_SIZE_3 192 #endif #ifdef RD_WG_SIZE_4_0 #define BLOCK_SIZE_4 RD_WG_SIZE_4_0 #elif defined(RD_WG_SIZE_4) #define BLOCK_SIZE_4 RD_WG_SIZE_4 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_4 RD_WG_SIZE #else #define BLOCK_SIZE_4 192 #endif // #if block_length > 128 // #warning "the kernels may fail too launch on some systems if the block length is too large" // #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; //checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N)); cudaMalloc((void**)&t, sizeof(T)*N); return t; } template <typename T> void dealloc(T* array) { //checkCudaErrors(cudaFree((void*)array)); cudaFree((void*)array); } template <typename T> void copy(T* dst, T* src, int N) { //checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice)); cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice); } template <typename T> void upload(T* dst, T* src, int N) { //checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice)); cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice); } template <typename T> void download(T* dst, T* src, int N) { //checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost)); cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1); cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); // getLastCudaError("initialize_variables failed"); } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); //getLastCudaError("compute_step_factor failed"); } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3); cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes); //getLastCudaError("compute_flux failed"); } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); //getLastCudaError("update failed"); } /* * Main function */ int main(int argc, char** argv) { printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4); if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; cudaDeviceProp prop; int dev; //checkCudaErrors(cudaSetDevice(0)); cudaSetDevice(0); //checkCudaErrors(cudaGetDevice(&dev)); cudaGetDevice(&dev); //checkCudaErrors(cudaGetDeviceProperties(&prop, dev)); cudaGetDeviceProperties(&prop, dev); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu //checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)); //checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) ); cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)); //checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) ); cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)); //checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) ); cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)); //checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) ); cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)); } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ std::min(1, nel % BLOCK_SIZE_0)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaThreadSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = 0; // unsigned int timer = 0; // CUT_SAFE_CALL( cutCreateTimer( &timer)); // CUT_SAFE_CALL( cutStartTimer( timer)); sdkCreateTimer(&timer); sdkStartTimer(&timer); // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); //getLastCudaError("compute_step_factor failed"); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); //getLastCudaError("compute_flux failed"); time_step(j, nelr, old_variables, variables, step_factors, fluxes); //getLastCudaError("time_step failed"); } } cudaThreadSynchronize(); // CUT_SAFE_CALL( cutStopTimer(timer) ); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "Done..." << std::endl; return 0; }
e3a7440d7d4145934328bba5e2d4245dfa3cbdbc.hip
// !!! This is a file automatically generated by hipify!!! //#define USE_LPSO //#define USE_TAPSO //#define USE_TPSO #include "LPSO.cu" #include "TPSO.cu" #include "TAPSO.cu" #include <iostream> int main(int argc, char**argv) { PSOBase *psoList[] = { #ifdef USE_LPSO new LPSO<EvalBanana>(400, 2000), new LPSO<EvalBanana>(1200, 2000), new LPSO<EvalBanana>(2000, 2000), new LPSO<EvalBanana>(2800, 2000), // F1 new LPSO<EvalF1>(400, 2000), new LPSO<EvalF1>(1200, 2000), new LPSO<EvalF1>(2000, 2000), new LPSO<EvalF1>(2800, 2000), // F2 new LPSO<EvalF2>(400, 2000), new LPSO<EvalF2>(1200, 2000), new LPSO<EvalF2>(2000, 2000), new LPSO<EvalF2>(2800, 2000), // F3 new LPSO<EvalF3>(400, 2000), new LPSO<EvalF3>(1200, 2000), new LPSO<EvalF3>(2000, 2000), new LPSO<EvalF3>(2800, 2000), // F4 new LPSO<EvalF4>(400, 10000), new LPSO<EvalF4>(1200, 10000), new LPSO<EvalF4>(2000, 10000), new LPSO<EvalF4>(2800, 10000), #elif defined USE_TAPSO new TAPSO<EvalBanana>(400, 2000), new TAPSO<EvalBanana>(1200, 2000), new TAPSO<EvalBanana>(2000, 2000), new TAPSO<EvalBanana>(2800, 2000), // F1 new TAPSO<EvalF1>(400, 2000), new TAPSO<EvalF1>(1200, 2000), new TAPSO<EvalF1>(2000, 2000), new TAPSO<EvalF1>(2800, 2000), // F2 new TAPSO<EvalF2>(400, 2000), new TAPSO<EvalF2>(1200, 2000), new TAPSO<EvalF2>(2000, 2000), new TAPSO<EvalF2>(2800, 2000), // F3 new TAPSO<EvalF3>(400, 2000), new TAPSO<EvalF3>(1200, 2000), new TAPSO<EvalF3>(2000, 2000), new TAPSO<EvalF3>(2800, 2000), // F4 new TAPSO<EvalF4>(400, 10000), new TAPSO<EvalF4>(1200, 10000), new TAPSO<EvalF4>(2000, 10000), new TAPSO<EvalF4>(2800, 10000), #elif defined USE_TPSO new TPSO<EvalBanana>(400, 2000), new TPSO<EvalBanana>(1200, 2000), new TPSO<EvalBanana>(2000, 2000), new TPSO<EvalBanana>(2800, 2000), // F1 new TPSO<EvalF1>(400, 2000), new TPSO<EvalF1>(1200, 2000), new TPSO<EvalF1>(2000, 2000), new TPSO<EvalF1>(2800, 2000), // F2 new TPSO<EvalF2>(400, 2000), new TPSO<EvalF2>(1200, 2000), new TPSO<EvalF2>(2000, 2000), new TPSO<EvalF2>(2800, 2000), // F3 new TPSO<EvalF3>(400, 2000), new TPSO<EvalF3>(1200, 2000), new TPSO<EvalF3>(2000, 2000), new TPSO<EvalF3>(2800, 2000), // F4 new TPSO<EvalF4>(400, 10000), new TPSO<EvalF4>(1200, 10000), new TPSO<EvalF4>(2000, 10000), new TPSO<EvalF4>(2800, 10000), #else new TPSO<EvalBanana>(51200, 50), #endif }; for (int i = 0; i < sizeof(psoList) / sizeof(PSOBase *); ++i) { PSOBase &pso(*psoList[i]); cout << "Funcao objetivo: " << pso.GetName() << endl; pso.Init(); pso.PrintHeader(); while (true){ hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); pso.Iterate(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); pso.PrintStatus(elapsedTime); cout << endl; } } while (true); }
e3a7440d7d4145934328bba5e2d4245dfa3cbdbc.cu
//#define USE_LPSO //#define USE_TAPSO //#define USE_TPSO #include "LPSO.cu" #include "TPSO.cu" #include "TAPSO.cu" #include <iostream> int main(int argc, char**argv) { PSOBase *psoList[] = { #ifdef USE_LPSO new LPSO<EvalBanana>(400, 2000), new LPSO<EvalBanana>(1200, 2000), new LPSO<EvalBanana>(2000, 2000), new LPSO<EvalBanana>(2800, 2000), // F1 new LPSO<EvalF1>(400, 2000), new LPSO<EvalF1>(1200, 2000), new LPSO<EvalF1>(2000, 2000), new LPSO<EvalF1>(2800, 2000), // F2 new LPSO<EvalF2>(400, 2000), new LPSO<EvalF2>(1200, 2000), new LPSO<EvalF2>(2000, 2000), new LPSO<EvalF2>(2800, 2000), // F3 new LPSO<EvalF3>(400, 2000), new LPSO<EvalF3>(1200, 2000), new LPSO<EvalF3>(2000, 2000), new LPSO<EvalF3>(2800, 2000), // F4 new LPSO<EvalF4>(400, 10000), new LPSO<EvalF4>(1200, 10000), new LPSO<EvalF4>(2000, 10000), new LPSO<EvalF4>(2800, 10000), #elif defined USE_TAPSO new TAPSO<EvalBanana>(400, 2000), new TAPSO<EvalBanana>(1200, 2000), new TAPSO<EvalBanana>(2000, 2000), new TAPSO<EvalBanana>(2800, 2000), // F1 new TAPSO<EvalF1>(400, 2000), new TAPSO<EvalF1>(1200, 2000), new TAPSO<EvalF1>(2000, 2000), new TAPSO<EvalF1>(2800, 2000), // F2 new TAPSO<EvalF2>(400, 2000), new TAPSO<EvalF2>(1200, 2000), new TAPSO<EvalF2>(2000, 2000), new TAPSO<EvalF2>(2800, 2000), // F3 new TAPSO<EvalF3>(400, 2000), new TAPSO<EvalF3>(1200, 2000), new TAPSO<EvalF3>(2000, 2000), new TAPSO<EvalF3>(2800, 2000), // F4 new TAPSO<EvalF4>(400, 10000), new TAPSO<EvalF4>(1200, 10000), new TAPSO<EvalF4>(2000, 10000), new TAPSO<EvalF4>(2800, 10000), #elif defined USE_TPSO new TPSO<EvalBanana>(400, 2000), new TPSO<EvalBanana>(1200, 2000), new TPSO<EvalBanana>(2000, 2000), new TPSO<EvalBanana>(2800, 2000), // F1 new TPSO<EvalF1>(400, 2000), new TPSO<EvalF1>(1200, 2000), new TPSO<EvalF1>(2000, 2000), new TPSO<EvalF1>(2800, 2000), // F2 new TPSO<EvalF2>(400, 2000), new TPSO<EvalF2>(1200, 2000), new TPSO<EvalF2>(2000, 2000), new TPSO<EvalF2>(2800, 2000), // F3 new TPSO<EvalF3>(400, 2000), new TPSO<EvalF3>(1200, 2000), new TPSO<EvalF3>(2000, 2000), new TPSO<EvalF3>(2800, 2000), // F4 new TPSO<EvalF4>(400, 10000), new TPSO<EvalF4>(1200, 10000), new TPSO<EvalF4>(2000, 10000), new TPSO<EvalF4>(2800, 10000), #else new TPSO<EvalBanana>(51200, 50), #endif }; for (int i = 0; i < sizeof(psoList) / sizeof(PSOBase *); ++i) { PSOBase &pso(*psoList[i]); cout << "Funcao objetivo: " << pso.GetName() << endl; pso.Init(); pso.PrintHeader(); while (true){ cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); pso.Iterate(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); pso.PrintStatus(elapsedTime); cout << endl; } } while (true); }
cb0ab0f87ae65f8bbd25083e2df4ad0fd889e6c1.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <iostream> #include <typeinfo> #include <random> #include <stdint.h> #include <string> #include <fstream> #include <sstream> // CUBLAS GEMM API #include <rocblas.h> // Cutlass GEMM API #include <cutlass/util/util.h> #include <cutlass/gemm/dispatch_back.h> #include <cutlass/gemm/epilogue_function.h> // Dispatch routines to CUTLASS #include "cutlass_dispatch_back.h" using namespace std; using namespace cutlass; /** * Compute C = A.matmul(B), where A and B are dense, and only on the sparse support of C **/ template <typename func_t, ///< Test function type gemm::tiling_strategy::kind_t TilingStrategy, matrix_transform_t::kind_t TransformA, ///< Transformation op for matrix A matrix_transform_t::kind_t TransformB, ///< Transformation op for matrix B typename value_t, ///< Multiplicand value type (matrices A and B) typename accum_t ///< Accumulator value type (matrix C and scalars) > hipError_t back_full(value_t* A_data, value_t* B_data, accum_t* C_data, int2* C_blocks, long C_blocks_length, int m, ///< Height of C in rows int n, ///< Width of C in columns int k) { typedef gemm::gemm_policy<value_t, accum_t, TransformA, TransformB, TilingStrategy> block_task_back_policy_t; hipStream_t stream = 0; func_t func; hipError_t error = func(m, n, k, A_data, B_data, C_data, C_blocks, C_blocks_length, accum_t(1.0), accum_t(0.0), stream, false).result; return error; } /** * Compute C = A.matmul(B), where A and B are dense, and only on the sparse support of C **/ template <matrix_transform_t::kind_t TransformA, ///< Transformation op for matrix A matrix_transform_t::kind_t TransformB, ///< Transformation op for matrix B typename value_t, ///< Multiplicand value type (matrices A and B) typename accum_t ///< Accumulator value type (matrix C and scalars) > hipError_t back(value_t* A_data, value_t* B_data, accum_t* C_data, int2* C_blocks, long C_blocks_length, int m, ///< Height of C in rows int n, ///< Width of C in columns int k) { hipError_t error = back_full<cutlass_gemm_dispatch_back<gemm::tiling_strategy::CustomBack, math_operation_class_t::scalar, TransformA, TransformB, value_t, accum_t>, gemm::tiling_strategy::CustomBack, TransformA, TransformB, value_t, accum_t>(A_data, B_data, C_data, C_blocks, C_blocks_length, m, n, k); return error; } typedef hipError_t (*back_t)(float* A_data, float* B_data, float* C_data, int2* C_blocks, long C_blocks_length, int m, int n, int k); /** * matrix a must be of dimensions [m,k] * matrix b must be of dimensions [k,n] * if pytorch_contiguous_a is true, then dense_a must be contiguous, ortherwise dense_a.t() must be contiguous. * if pytorch_contiguous_b is true, then dense_b must be contiguous, ortherwise dense_b.t() must be contiguous. **/ int blocksparse_matmul_back_cutlass(torch::Tensor dense_a, bool pytorch_contiguous_a, torch::Tensor dense_b, bool pytorch_contiguous_b, int m, int n, int k, int block_size_rows_b, int block_size_cols_b, torch::Tensor sparse_c, torch::Tensor sparse_blocks_c, long sparse_blocks_length_c) { typedef float value_t; typedef float accum_t; value_t* A_data = (value_t*)dense_a.data_ptr(); value_t* B_data = (value_t*)dense_b.data_ptr(); value_t* C_data = (value_t*)sparse_c.data_ptr(); int2* C_blocks = (int2*)sparse_blocks_c.data_ptr(); long C_blocks_length = sparse_blocks_length_c; back_t back_fun; static const matrix_transform_t::kind_t NonTranspose = matrix_transform_t::NonTranspose; static const matrix_transform_t::kind_t Transpose = matrix_transform_t::Transpose; if (pytorch_contiguous_a) { if (pytorch_contiguous_b) { back_fun = back<Transpose, Transpose, value_t, accum_t>; } else { back_fun = back<Transpose, NonTranspose, value_t, accum_t>; } } else { if (pytorch_contiguous_b) { back_fun = back<NonTranspose, Transpose, value_t, accum_t>; } else { back_fun = back<NonTranspose, NonTranspose, value_t, accum_t>; } } return back_fun(A_data,B_data, C_data, C_blocks, C_blocks_length, m, n, k); }
cb0ab0f87ae65f8bbd25083e2df4ad0fd889e6c1.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <iostream> #include <typeinfo> #include <random> #include <stdint.h> #include <string> #include <fstream> #include <sstream> // CUBLAS GEMM API #include <cublas_v2.h> // Cutlass GEMM API #include <cutlass/util/util.h> #include <cutlass/gemm/dispatch_back.h> #include <cutlass/gemm/epilogue_function.h> // Dispatch routines to CUTLASS #include "cutlass_dispatch_back.h" using namespace std; using namespace cutlass; /** * Compute C = A.matmul(B), where A and B are dense, and only on the sparse support of C **/ template <typename func_t, ///< Test function type gemm::tiling_strategy::kind_t TilingStrategy, matrix_transform_t::kind_t TransformA, ///< Transformation op for matrix A matrix_transform_t::kind_t TransformB, ///< Transformation op for matrix B typename value_t, ///< Multiplicand value type (matrices A and B) typename accum_t ///< Accumulator value type (matrix C and scalars) > cudaError_t back_full(value_t* A_data, value_t* B_data, accum_t* C_data, int2* C_blocks, long C_blocks_length, int m, ///< Height of C in rows int n, ///< Width of C in columns int k) { typedef gemm::gemm_policy<value_t, accum_t, TransformA, TransformB, TilingStrategy> block_task_back_policy_t; cudaStream_t stream = 0; func_t func; cudaError_t error = func(m, n, k, A_data, B_data, C_data, C_blocks, C_blocks_length, accum_t(1.0), accum_t(0.0), stream, false).result; return error; } /** * Compute C = A.matmul(B), where A and B are dense, and only on the sparse support of C **/ template <matrix_transform_t::kind_t TransformA, ///< Transformation op for matrix A matrix_transform_t::kind_t TransformB, ///< Transformation op for matrix B typename value_t, ///< Multiplicand value type (matrices A and B) typename accum_t ///< Accumulator value type (matrix C and scalars) > cudaError_t back(value_t* A_data, value_t* B_data, accum_t* C_data, int2* C_blocks, long C_blocks_length, int m, ///< Height of C in rows int n, ///< Width of C in columns int k) { cudaError_t error = back_full<cutlass_gemm_dispatch_back<gemm::tiling_strategy::CustomBack, math_operation_class_t::scalar, TransformA, TransformB, value_t, accum_t>, gemm::tiling_strategy::CustomBack, TransformA, TransformB, value_t, accum_t>(A_data, B_data, C_data, C_blocks, C_blocks_length, m, n, k); return error; } typedef cudaError_t (*back_t)(float* A_data, float* B_data, float* C_data, int2* C_blocks, long C_blocks_length, int m, int n, int k); /** * matrix a must be of dimensions [m,k] * matrix b must be of dimensions [k,n] * if pytorch_contiguous_a is true, then dense_a must be contiguous, ortherwise dense_a.t() must be contiguous. * if pytorch_contiguous_b is true, then dense_b must be contiguous, ortherwise dense_b.t() must be contiguous. **/ int blocksparse_matmul_back_cutlass(torch::Tensor dense_a, bool pytorch_contiguous_a, torch::Tensor dense_b, bool pytorch_contiguous_b, int m, int n, int k, int block_size_rows_b, int block_size_cols_b, torch::Tensor sparse_c, torch::Tensor sparse_blocks_c, long sparse_blocks_length_c) { typedef float value_t; typedef float accum_t; value_t* A_data = (value_t*)dense_a.data_ptr(); value_t* B_data = (value_t*)dense_b.data_ptr(); value_t* C_data = (value_t*)sparse_c.data_ptr(); int2* C_blocks = (int2*)sparse_blocks_c.data_ptr(); long C_blocks_length = sparse_blocks_length_c; back_t back_fun; static const matrix_transform_t::kind_t NonTranspose = matrix_transform_t::NonTranspose; static const matrix_transform_t::kind_t Transpose = matrix_transform_t::Transpose; if (pytorch_contiguous_a) { if (pytorch_contiguous_b) { back_fun = back<Transpose, Transpose, value_t, accum_t>; } else { back_fun = back<Transpose, NonTranspose, value_t, accum_t>; } } else { if (pytorch_contiguous_b) { back_fun = back<NonTranspose, Transpose, value_t, accum_t>; } else { back_fun = back<NonTranspose, NonTranspose, value_t, accum_t>; } } return back_fun(A_data,B_data, C_data, C_blocks, C_blocks_length, m, n, k); }
c74547a476486aef445a7482413cbe51bffca5b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void square(float * d_out , float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main (int argc , char **argv){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); //generate the input array on the host float h_in[ARRAY_SIZE]; for(int i = 0; i< ARRAY_SIZE ;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; //declare GPU memeory pointers float * d_in; float * d_out; //allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES ); //transfer the array to GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); //launch the kernel hipLaunchKernelGGL(( square), dim3(1),dim3(ARRAY_SIZE), 0, 0, d_out,d_in); //copy back the result array to the cpu hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); //print out the resulting array for(int i = 0; i<ARRAY_SIZE;i++){ printf("%f",h_out[i]); printf(((i %4) != 3) ? "\t" : "\n"); } // free GPU memory allocation hipFree(d_in); hipFree(d_out); return 0; }
c74547a476486aef445a7482413cbe51bffca5b2.cu
#include <stdio.h> __global__ void square(float * d_out , float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main (int argc , char **argv){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); //generate the input array on the host float h_in[ARRAY_SIZE]; for(int i = 0; i< ARRAY_SIZE ;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; //declare GPU memeory pointers float * d_in; float * d_out; //allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES ); //transfer the array to GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); //launch the kernel square<<<1,ARRAY_SIZE>>>(d_out,d_in); //copy back the result array to the cpu cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); //print out the resulting array for(int i = 0; i<ARRAY_SIZE;i++){ printf("%f",h_out[i]); printf(((i %4) != 3) ? "\t" : "\n"); } // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; }