【问题标题】:Copying array of pointers into device memory and back (CUDA)将指针数组复制到设备内存并返回(CUDA)
【发布时间】:2015-03-11 23:15:21
【问题描述】:

我正在尝试在我的玩具示例中使用 cublas 函数 cublasSgemmBatched。在本例中,我首先分配二维数组:大小为 [6][5] 的 h_AA, h_BB 和大小为 [6][1] 的 h_CC。之后我将它复制到设备上,执行cublasSgemmBatched 并尝试将数组d_CC 复制回主机数组h_CC。但是,我在设备到主机复制时遇到错误 (cudaErrorLaunchFailure),我不确定我是否将数组正确复制到设备中:

int main(){
    cublasHandle_t handle;
    cudaError_t cudaerr;
    cudaEvent_t start, stop;
    cublasStatus_t stat;
    const float alpha = 1.0f;
    const float beta = 0.0f;
    float **h_AA, **h_BB, **h_CC;
    h_AA = new float*[6];
    h_BB = new float*[6];
    h_CC = new float*[6];
    for (int i = 0; i < 6; i++){
        h_AA[i] = new float[5];
        h_BB[i] = new float[5];
        h_CC[i] = new float[1];
        for (int j = 0; j < 5; j++){
            h_AA[i][j] = j;
            h_BB[i][j] = j;
        }
        h_CC[i][0] = 1;
    }
    float **d_AA, **d_BB, **d_CC;
    cudaMalloc(&d_AA, 6 * sizeof(float*));
    cudaMalloc(&d_BB, 6 * sizeof(float*));
    cudaMalloc(&d_CC, 6 * sizeof(float*));
    cudaerr = cudaMemcpy(d_AA, h_AA, 6 * sizeof(float*), cudaMemcpyHostToDevice);
    cudaerr = cudaMemcpy(d_BB, h_BB, 6 * sizeof(float*), cudaMemcpyHostToDevice);
    cudaerr = cudaMemcpy(d_CC, h_CC, 6 * sizeof(float*), cudaMemcpyHostToDevice);
    stat = cublasCreate(&handle);
    stat = cublasSgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, 1, 5, &alpha,
             (const float**)d_AA, 1, (const float**)d_BB, 5, &beta, d_CC, 1, 6);
    cudaerr = cudaMemcpy(h_CC, d_CC, 6 * sizeof(float*), cudaMemcpyDeviceToHost);
    cublasDestroy(handle);
}

所以这段代码有效,但是最后一个cudaerr 返回cudaErrorLaunchFailure。我试图在Github 上遵循此示例代码。

谢谢

附:我不明白的是,sizeof(float*) 是什么以及cudaMalloc 如何知道每个数组需要多少内存(就像这里我只确定 1 维的大小)。

更新:我做到了!:

cublasHandle_t handle;
cudaError_t cudaerr;
cudaEvent_t start, stop;
cublasStatus_t stat;
const float alpha = 1.0f;
const float beta = 0.0f;

float *h_A = new float[5];
float *h_B = new float[5];
float *h_C = new float[6];
for (int i = 0; i < 5; i++)
{
    h_A[i] = i;
    h_B[i] = i;
}



float **h_AA, **h_BB, **h_CC;
h_AA = (float**)malloc(6* sizeof(float*));
h_BB = (float**)malloc(6 * sizeof(float*));
h_CC = (float**)malloc(6 * sizeof(float*));
for (int i = 0; i < 6; i++){
    cudaMalloc((void **)&h_AA[i], 5 * sizeof(float));
    cudaMalloc((void **)&h_BB[i], 5 * sizeof(float));
    cudaMalloc((void **)&h_CC[i], sizeof(float));
    cudaMemcpy(h_AA[i], h_A, 5 * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(h_BB[i], h_B, 5 * sizeof(float), cudaMemcpyHostToDevice);
}
float **d_AA, **d_BB, **d_CC;
cudaMalloc(&d_AA, 6 * sizeof(float*));
cudaMalloc(&d_BB, 6 * sizeof(float*));
cudaMalloc(&d_CC, 6 * sizeof(float*));
cudaerr = cudaMemcpy(d_AA, h_AA, 6 * sizeof(float*), cudaMemcpyHostToDevice);
cudaerr = cudaMemcpy(d_BB, h_BB, 6 * sizeof(float*), cudaMemcpyHostToDevice);
cudaerr = cudaMemcpy(d_CC, h_CC, 6 * sizeof(float*), cudaMemcpyHostToDevice);
stat = cublasCreate(&handle);
    stat = cublasSgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, 1, 5, &alpha, 
             (const float**)d_AA, 1, (const float**)d_BB, 5, &beta, d_CC, 1, 6);
    cudaerr = cudaMemcpy(h_CC, d_CC, sizeof(float), cudaMemcpyDeviceToHost);
    for (int i = 0; i < 6;i++)
        cudaMemcpy(h_C+i, h_CC[i], sizeof(float), cudaMemcpyDeviceToHost);
cublasDestroy(handle);

【问题讨论】:

  • 您传递的混乱数据导致批处理 gemm 调用启动的内核之一失败。作为一个异步错误,您可能在下一次 cuda 调用之前不会收到它的通知。你研究过批处理 cublas cuda sample code 吗?
  • 我没有,现在就做
  • 我做到了!谢谢。那么,我的理解是否正确:为了开展 2D 设备数组的业务,您应该创建指向设备数组的指针的主机数组,然后将该数组复制到 2D 设备数组内存。为了从 2D 设备数组中检索 2D 主机数组,您应该再次使用中间 2D 数组,它是指向设备数组的主机指针数组。我将工作代码发布到更新中
  • 是的,这是一个需要深度复制机制的示例,这类似于您想要将矩阵复制到设备并能够访问它直接使用双下标表示法。您所说的二维设备阵列仍然是线性/扁平阵列。它的“2D”或深拷贝方面出现是因为您有一个要传递给设备的这些数组的数组,这类似于传递双下标数组所需的深拷贝机制。你为什么不发布你的更新作为答案。可以自己回答问题

标签: arrays pointers cuda cublas


【解决方案1】:

所以,我找到了答案(感谢@Robert Crovella):为了创建device array of pointers to device arrays(用于批处理函数),首先应该创建host array of pointers to device arrays,然后将其复制到device array of pointers to device arrays。转回主机也是如此:应该使用中间host array of pointers to device arrays

cublasHandle_t handle;
cudaError_t cudaerr;
cudaEvent_t start, stop;
cublasStatus_t stat;
const float alpha = 1.0f;
const float beta = 0.0f;

float *h_A = new float[5];
float *h_B = new float[5];
float *h_C = new float[6];
for (int i = 0; i < 5; i++)
{
    h_A[i] = i;
    h_B[i] = i;
}



float **h_AA, **h_BB, **h_CC;
h_AA = (float**)malloc(6* sizeof(float*));
h_BB = (float**)malloc(6 * sizeof(float*));
h_CC = (float**)malloc(6 * sizeof(float*));
for (int i = 0; i < 6; i++){
    cudaMalloc((void **)&h_AA[i], 5 * sizeof(float));
    cudaMalloc((void **)&h_BB[i], 5 * sizeof(float));
    cudaMalloc((void **)&h_CC[i], sizeof(float));
    cudaMemcpy(h_AA[i], h_A, 5 * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(h_BB[i], h_B, 5 * sizeof(float), cudaMemcpyHostToDevice);
}
float **d_AA, **d_BB, **d_CC;
cudaMalloc(&d_AA, 6 * sizeof(float*));
cudaMalloc(&d_BB, 6 * sizeof(float*));
cudaMalloc(&d_CC, 6 * sizeof(float*));
cudaerr = cudaMemcpy(d_AA, h_AA, 6 * sizeof(float*), cudaMemcpyHostToDevice);
cudaerr = cudaMemcpy(d_BB, h_BB, 6 * sizeof(float*), cudaMemcpyHostToDevice);
cudaerr = cudaMemcpy(d_CC, h_CC, 6 * sizeof(float*), cudaMemcpyHostToDevice);
stat = cublasCreate(&handle);
    stat = cublasSgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, 1, 5, &alpha, 
             (const float**)d_AA, 1, (const float**)d_BB, 5, &beta, d_CC, 1, 6);
    cudaerr = cudaMemcpy(h_CC, d_CC, sizeof(float), cudaMemcpyDeviceToHost);
    for (int i = 0; i < 6;i++)
        cudaMemcpy(h_C+i, h_CC[i], sizeof(float), cudaMemcpyDeviceToHost);
cublasDestroy(handle);

【讨论】:

  • 我觉得应该是cudaMemcpy(h_CC, d_CC, 6*sizeof(float*), cudaMemcpyDeviceToHost);cudaMemcpy(d_CC, h_CC, 6*sizeof(float*), cudaMemcpyHostToDevice);比较一下是不是错字?
猜你喜欢
  • 1970-01-01
  • 1970-01-01
  • 2014-08-30
  • 2017-03-23
  • 1970-01-01
  • 2013-06-06
  • 1970-01-01
  • 2015-10-20
  • 2020-01-01
相关资源
最近更新 更多