Search code examples
cudagpu-managed-memory

How to specify GPU id for cudaMemAdviseSetPreferredLocation


I keep getting "invalid device ordinal" when trying to set the preferred location of managed memory to GPU #0:

CUDA_ERR_CHECK(cudaMemAdvise(deviceMemoryHeap.pool, size,
    cudaMemAdviseSetPreferredLocation, 0));

The only thing that works is cudaCpuDeviceId. So, how to specify the GPU id?

EDIT Adding a simple example:

#define CUDA_ERR_CHECK(x)                                  \
    do { cudaError_t err = x; if (err != cudaSuccess) {    \
        fprintf(stderr, "CUDA error %d \"%s\" at %s:%d\n", \
        (int)err, cudaGetErrorString(err),                 \
        __FILE__, __LINE__);                               \
        exit(1);                                           \
    }} while (0);

#include <cstdio>

template<typename T>
__global__ void kernel(size_t* value)
{
    *value = sizeof(T);
}

int main()
{
    size_t size = 1024 * 1024 * 1024;

    size_t* managed = NULL;
    CUDA_ERR_CHECK(cudaMallocManaged(&managed, size, cudaMemAttachGlobal));
    CUDA_ERR_CHECK(cudaMemAdvise(managed, size,
        cudaMemAdviseSetPreferredLocation, 0));
    kernel<double><<<1, 1>>>(managed);
    CUDA_ERR_CHECK(cudaGetLastError());
    CUDA_ERR_CHECK(cudaDeviceSynchronize());
    CUDA_ERR_CHECK(cudaFree(managed));
    size_t* memory = NULL;
    CUDA_ERR_CHECK(cudaMalloc(&memory, size));
    kernel<double><<<1, 1>>>(memory);
    CUDA_ERR_CHECK(cudaGetLastError());
    CUDA_ERR_CHECK(cudaDeviceSynchronize());
    CUDA_ERR_CHECK(cudaFree(memory));

    return 0;
}

Throws an error:

$ make
nvcc -arch=sm_30 managed.cu -o managed
$ ./managed 
CUDA error 10 "invalid device ordinal" at managed.cu:24

CUDA 8.0

My goal is to get rid of the giant cudaLaunch call latency, which happens only in case of managed memory kernel launch:

cudaLaunch latency in case of argument in managed memory


Solution

  • The error seems to be originating from a missing device capability. As the CUDA documentation for the cudaMemAdvise function states:

    If device is a GPU, then it must have a non-zero value for the device attribute cudaDevAttrConcurrentManagedAccess.

    You should call the following code to make sure that the device is OK for concurrent managed use:

    int device_id = 0, result = 0;
    cudaDeviceGetAttribute (&result, cudaDevAttrConcurrentManagedAccess, device_id);
    if (result) {
        // Call cudaMemAdvise
    }