I was trying to make a game program using __device __ variables instead of declaring it dynamically using cudaMalloc, but it keeps telling me that GPUassert: illegal memory access was encountered at the third last line where the cudaDeviceSynchronization() is called. I have tried the version using cudaMalloc and it worked out fine.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cmath>
#include <stdio.h>
#include <stdlib.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define M 3
#define N 3
#define K 3
using namespace std;
__device__ double A_dev[M * K];
__device__ double B_dev[K * N];
__device__ double C_dev[M * N];
__global__ void gemm(double* A, double* B, double* C, int m, int n, int k)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = x * n + y;
double sum = 0.0;
for (int j = 0; j < k; j++)
{
sum += A[x * k + j] * B[n * j + y];
}
C[i] = sum;
printf("The value is %f", C[i]);
}
int main(void)
{
double A_h[M * K];
double B_h[K * N];
double C_h[M * N];
for (int i = 0; i < M*K; i++)
{
A_h[i] = (double)i;
B_h[i] = (double)i;
C_h[i] = 0.0;
}
gpuErrchk(cudaMemcpyToSymbol(A_dev, A_h, M * K * sizeof(double), 0, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(B_dev, B_h, K * N * sizeof(double), 0, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(C_dev, C_h, M * N * sizeof(double), 0, cudaMemcpyHostToDevice));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 3, 1);
gemm <<<dimGrid, dimBlock >>> (A_dev, B_dev, C_dev, 3, 3, 3);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpyFromSymbol(C_h, C_dev, M * N * sizeof(double), 0, cudaMemcpyDeviceToHost));
return 0;
}
When using __device__
variables, they are inherently at global scope, and we do not pass those as kernel arguments. You use those variables directly in kernel code without having to have a kernel argument for them.
If you make the following changes to your code, it will run without error:
#include <iostream>
#include <cmath>
#include <stdio.h>
#include <stdlib.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define M 3
#define N 3
#define K 3
using namespace std;
__device__ double A_dev[M * K];
__device__ double B_dev[K * N];
__device__ double C_dev[M * N];
__global__ void gemm(int m, int n, int k)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = x * n + y;
double sum = 0.0;
for (int j = 0; j < k; j++)
{
sum += A_dev[x * k + j] * B_dev[n * j + y];
}
C_dev[i] = sum;
printf("The value is %f", C_dev[i]);
}
int main(void)
{
double A_h[M * K];
double B_h[K * N];
double C_h[M * N];
for (int i = 0; i < M*K; i++)
{
A_h[i] = (double)i;
B_h[i] = (double)i;
C_h[i] = 0.0;
}
gpuErrchk(cudaMemcpyToSymbol(A_dev, A_h, M * K * sizeof(double), 0, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(B_dev, B_h, K * N * sizeof(double), 0, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(C_dev, C_h, M * N * sizeof(double), 0, cudaMemcpyHostToDevice));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 3, 1);
gemm <<<dimGrid, dimBlock >>> (3, 3, 3);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpyFromSymbol(C_h, C_dev, M * N * sizeof(double), 0, cudaMemcpyDeviceToHost));
return 0;
}