HIP/CUDA wavefront 内线程通信中一些要注意的点
 · 8 min read
最近在用 HIP 写 SpMV(稀疏矩阵向量乘),在算法实现过程中,遇到了一些 wavefront/block 内线程通信的问题,在此记录下。
在条件语句中谨慎使用 __syncthreads
我们都知道 __syncthreads() 可用于让 block 内的线程同步。
在 AMD GPU 上(ROCm),__syncthreads 会被编译成 s_barrier 指令(注:链接中的地址是 AMD GPU GFX9的内容),并加上必要的的全局访存(global memory) 和 LDS 访存 (shared memory) 的同步。
一般地,但 block 中的线程都会操作 LDS(如往其中写入数据), 但后续执行过程中,线程又需要用到 LDS 中的数据时(如从其中取数据),通常会在用数据之前加上 __syncthreads, 以保证前面block 内所有的线程操作 LDS 的步骤都已经完成了。
我们考虑下面这个示例代码:
#include <stdio.h>
constexpr int THREADS_PER_BLOCK = 256;
constexpr int N = 8;
constexpr int VECTOR_SIZE = 4;
constexpr int REDUCE_SIZE = 8;
__global__ void test_kernel(int *x, int *y, int alpha) {
  const int g_tid = threadIdx.x + blockDim.x * blockIdx.x; // global thread id
  const int g_bid = blockIdx.x; // global block id
  const int tid_in_block = g_tid % blockDim.x;
  __shared__ int SH[THREADS_PER_BLOCK];
  constexpr int VECTOR_NUM = THREADS_PER_BLOCK / VECTOR_SIZE; // vectors in block
  const int g_vector_id = g_tid / VECTOR_SIZE;
  const int tid_in_vector = g_tid % VECTOR_SIZE;
  const int vec_id_in_block = tid_in_block / VECTOR_SIZE;
  __shared__ int lds_y[VECTOR_NUM];
  int K = 0;
  for (int i = 0; i < N; i++) {
    const int index = i * THREADS_PER_BLOCK + g_tid;
    SH[tid_in_block] = x[index];
    __syncthreads(); // label:sync1:
    // reduce in vector
    if (vec_id_in_block < THREADS_PER_BLOCK / REDUCE_SIZE) { // label1:
      int sum = 0;
      for (int j = 0; j < REDUCE_SIZE / VECTOR_SIZE; j++) { // label2:
        const int lds_index = vec_id_in_block * REDUCE_SIZE + tid_in_vector + j * VECTOR_SIZE;
        sum += SH[lds_index];
      }
      for (int j = VECTOR_SIZE >> 1; j > 0; j >>= 1) {
        sum += __shfl_down(sum, j, VECTOR_SIZE);
      }
      // store sum value to y with memory coalescing
      if (tid_in_vector == 0) { // label3:
        lds_y[vec_id_in_block] = sum;
      }
    // }
    __syncthreads(); // label:sync2:
    // if (vec_id_in_block < THREADS_PER_BLOCK / REDUCE_SIZE) {
      if (tid_in_block < THREADS_PER_BLOCK / REDUCE_SIZE) { // label4:
        const int local_sum = lds_y[tid_in_block];
        y[K + tid_in_block] = alpha * local_sum;
      }
    }
    K += THREADS_PER_BLOCK / REDUCE_SIZE;
  }
}
int main() {
  constexpr int DATA_SIZE = THREADS_PER_BLOCK * N;
  int *hx = new int[DATA_SIZE];
  int *hy = new int[DATA_SIZE/REDUCE_SIZE];
  for (int i = 0; i < DATA_SIZE; i++) {
    hx[i] = i;
  }
  int *x = nullptr;
  int *y = nullptr;
  cudaMalloc(&x, DATA_SIZE * sizeof(int));
  cudaMalloc(&y, DATA_SIZE / REDUCE_SIZE * sizeof(int));
  cudaMemcpy(x, hx, DATA_SIZE * sizeof(int), cudaMemcpyHostToDevice);
  test_kernel<<<1, THREADS_PER_BLOCK>>>(x, y, 1);
  cudaDeviceSynchronize();
  cudaMemcpy(hy, y, DATA_SIZE / REDUCE_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
  for (int i = 0; i < DATA_SIZE / REDUCE_SIZE; i++) {
    // let R <- REDUCE_SIZE;
    // hy[i] shoule be: R*(2*R*i+R-1)/2
    int R = REDUCE_SIZE;
    printf("%d\n", hy[i] == (R * (2 * R * i + R - 1) / 2));
  }
}