GET STARTED

GET INVOLVED

Authorization Required

Not a member? Register Now

I have a task, one point are calculated via 12 neighboring points

Scheme is attached

In the middle there is a point that need to calculate

Code looks very roughly like this:

long x = blockIdx.x * blockDim.x + threadIdx.x;

long y = blockIdx.y * blockDim.y + threadIdx.y;

data [x][y] = data [x][y] + data [x][y+1];

data [x][y] = data [x][y] + data [x-1][y+1] + data [x][y+1] + data [x+1][y+1];

data [x][y] = data [x][y] + data [x-2][y] + data [x-1][y] + data [x+1][y] + data [x+2][y] ;

data [x][y] = data [x][y] + data [x-1][y-1] + data [x][y-1] + data [x+1][y-1];

data [x][y] = data [x][y] + data [x][y-1];

I don't understand how to get data from neighboring cells, or is it impossible?

I know that GPU is not best to solve this, but I have to solve via GPU with loss of perfomance

I have a task, one point are calculated via 12 neighboring points

Scheme is attached

In the middle there is a point that need to calculate

Code looks very roughly like this:

long x = blockIdx.x * blockDim.x + threadIdx.x;

long y = blockIdx.y * blockDim.y + threadIdx.y;

data [x][y] = data [x][y] + data [x][y+1];

data [x][y] = data [x][y] + data [x-1][y+1] + data [x][y+1] + data [x+1][y+1];

data [x][y] = data [x][y] + data [x-2][y] + data [x-1][y] + data [x+1][y] + data [x+2][y] ;

data [x][y] = data [x][y] + data [x-1][y-1] + data [x][y-1] + data [x+1][y-1];

data [x][y] = data [x][y] + data [x][y-1];

I don't understand how to get data from neighboring cells, or is it impossible?

I know that GPU is not best to solve this, but I have to solve via GPU with loss of perfomance

The best way to solve this is using shared memory. Without shared memory each element is loaded 9 times from the memory. The other alternative is to put your problem as a sparse matrix and use cusparse library.

The best way to solve this is using shared memory. Without shared memory each element is loaded 9 times from the memory. The other alternative is to put your problem as a sparse matrix and use cusparse library.

The way with additional global array works! It's wonderful! code bellow

But doesn't work the way with shared memory, maybe I'm wrong somewhere, please look if you could

[code]

//it doesn't work, code with shared memory

__global__ void incKernel (float * data)

{

int xIndex = blockIdx.x * blockDim.x + threadIdx.x;

__shared__ float temp[256];

if (xIndex < 256)

{

temp[xIndex] = data[xIndex];

}

__syncthreads();

if ((xIndex > 0) && (xIndex < 256))

{

data [xIndex] = temp[xIndex-1] + temp[xIndex] + temp[xIndex+1];

}

}

float * getSourceHostArray(int sizeArray);

void check_for_error(const char *er_str);

int main( int argc, char * argv [] )

{

int sizeArray = 256;

int numBytes = sizeArray * sizeof ( float );

float *hostArray = getSourceHostArray(sizeArray);

float * deviceArray = NULL;

cudaMalloc ( (void**)&deviceArray, numBytes );

dim3 threads = dim3(32, 1);

dim3 blocks = dim3(sizeArray / threads.x, 1);

cudaMemcpy ( deviceArray, hostArray, numBytes, cudaMemcpyHostToDevice );

incKernel<<<blocks, threads>>>(deviceArray);

check_for_error("");

cudaMemcpy ( hostArray, deviceArray, numBytes, cudaMemcpyDeviceToHost );

for ( int i = 0; i < 256; i++ )

{

printf ( "hostArray[%d]= %f\n", i, hostArray [i] );

}

cudaFree (deviceArray);

delete hostArray;

printf("Succeed!!!");

getch();

return 0;

}

[/code]

[code]

//it works, code with additional global array

__global__ void incKernel (const float * src, float * dest)

{

int xIndex = blockIdx.x * blockDim.x + threadIdx.x;

if ((xIndex > 0) && (xIndex < 256))

{

dest [xIndex] = src[xIndex-1] + src[xIndex] + src[xIndex+1];

}

}

float * getSourceHostArray(int sizeArray);

void check_for_error(const char *er_str);

int main( int argc, char * argv [] )

{

int sizeArray = 256;

int numBytes = sizeArray * sizeof ( float );

float *hostArray = getSourceHostArray(sizeArray);

float * srcDeviceArray = NULL;

float * destDeviceArray = NULL;

cudaMalloc ( (void**)&srcDeviceArray, numBytes );

cudaMalloc ( (void**)&destDeviceArray, numBytes );

cudaMemcpy ( srcDeviceArray, hostArray, numBytes, cudaMemcpyHostToDevice );

cudaMemcpy ( destDeviceArray, hostArray, numBytes, cudaMemcpyHostToDevice );

dim3 threads = dim3(32, 1);

dim3 blocks = dim3(sizeArray / threads.x, 1);

incKernel<<<blocks, threads>>>(srcDeviceArray, destDeviceArray);

check_for_error("");

cudaMemcpy ( hostArray, destDeviceArray, numBytes, cudaMemcpyDeviceToHost );

for ( int i = 0; i < 256; i++ )

{

printf ( "hostArray[%d]= %f\n", i, hostArray [i] );

}

cudaFree (srcDeviceArray);

cudaFree (destDeviceArray);

delete hostArray;

printf("Succeed!!!");

getch();

return 0;

}

float * getSourceHostArray(int sizeArray)

{

float *array = new float [sizeArray];

for ( int i = 0; i < sizeArray; i++ )

{

array [i] = 1.0f;

}

return array;

}

[/code]

The way with additional global array works! It's wonderful! code bellow

But doesn't work the way with shared memory, maybe I'm wrong somewhere, please look if you could