使用openmpc通过openacc使用C在多个GPU上分配矩阵乘法工作

问题描述 投票:0回答:1

我正在尝试使用3个OpenMP线程分配在3个nVidia GPU上乘以两个NxN矩阵的工作。 (矩阵值将变大,因此长数据类型。)但是我无法将#pragma acc parallel loop放在正确的位置。我在共享的nVidia PDF中使用了一些例子,但没有运气。我知道最内层的循环不能并行化。但我希望这三个线程中的每一个都拥有GPU并完成部分工作。请注意,输入和输出矩阵被定义为全局变量,因为我一直在耗尽堆栈内存。

我已经尝试了下面的代码,但是我得到的编译错误都指向第75行,即#pragma acc parallel loop

[test@server ~]pgcc -acc -mp -ta=tesla:cc60 -Minfo=all -o testGPU matrixMultiplyopenmp.c

PGC-S-0035-Syntax error: Recovery attempted by replacing keyword for by keyword barrier (matrixMultiplyopenmp.c: 75)

PGC-S-0035-Syntax error: Recovery attempted by replacing acc by keyword enum (matrixMultiplyopenmp.c: 76)

PGC-S-0036-Syntax error: Recovery attempted by inserting ';' before keyword for (matrixMultiplyopenmp.c: 77)

PGC/x86-64 Linux 18.10-1: compilation completed with severe errors

功能是:

void multiplyMatrix(long long int matrixA[SIZE][SIZE], long long int matrixB[SIZE][SIZE], long long int matrixProduct[SIZE][SIZE])
{
    // Get Nvidia device type
    acc_init(acc_device_nvidia);

    // Get Number of GPUs in system
    int num_gpus = acc_get_num_devices(acc_device_nvidia);

    //Set the number of OpenMP thread to the number of GPUs
    #pragma omp parallel num_threads(num_gpus)
    {
        //Get thread openMP number and set the GPU device to that number
        int threadNum = omp_get_thread_num();
        acc_set_device_num(threadNum, acc_device_nvidia);

        int row;
        int col;
        int key;

        #pragma omp for
        #pragma acc parallel loop
        for (row = 0; row < SIZE; row++)
            for (col = 0; col < SIZE; col++)
                for (key = 0; key < SIZE; key++)
                    matrixProduct[row][col] = matrixProduct[row][col] + (matrixA[row][key] * matrixB[key][col]);
    }
}
c gpu openmp openacc pgcc
1个回答
0
投票

正如fisehara指出的那样,你不能在同一个for循环中同时使用OpenMP“for”循环和OpenACC并行循环。相反,您需要在OpenMP线程中手动分解工作。以下示例。

你有没有想在这里使用多个GPU的原因?矩阵乘法很可能适合单个GPU,因此不需要引入主机端并行化的额外开销。

另外,我通常建议使用MPI + OpenACC进行多gpu编程。域分解自然是MPI的一部分,但不是OpenMP中固有的。此外,MPI为您提供主机进程和加速器之间的一对一关系,允许扩展到单个节点之外,您可以利用CUDA Aware MPI进行直接GPU到GPU数据传输。有关更多信息,请搜索“MPI OpenACC”,您将找到几个教程。 https://developer.nvidia.com/openacc-advanced-course的2级是一个很好的资源。

% cat test.c
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#ifdef _OPENACC
#include <openacc.h>
#endif

#define SIZE 130

void multiplyMatrix(long long int matrixA[SIZE][SIZE], long long int matrixB[SIZE][SIZE], long long int matrixProduct[SIZE][SIZE])
{

#ifdef _OPENACC
    // Get Nvidia device type
    acc_init(acc_device_nvidia);
    // Get Number of GPUs in system
    int num_gpus = acc_get_num_devices(acc_device_nvidia);
#else
    int num_gpus = omp_get_max_threads();
#endif
    if (SIZE<num_gpus) {
        num_gpus=SIZE;
    }
    printf("Num Threads: %d\n",num_gpus);

    //Set the number of OpenMP thread to the number of GPUs
    #pragma omp parallel num_threads(num_gpus)
    {
        //Get thread openMP number and set the GPU device to that number
        int threadNum = omp_get_thread_num();
#ifdef _OPENACC
        acc_set_device_num(threadNum, acc_device_nvidia);
        printf("THID %d using GPU: %d\n",threadNum,threadNum);
#endif
        int row;
        int col;
        int key;
        int start, end;
        int block_size;
        block_size = SIZE/num_gpus;
        start = threadNum*block_size;
        end = start+block_size;
        if (threadNum==(num_gpus-1)) {
           // add the residual to the last thread
           end = SIZE;
        }
        printf("THID: %d, Start: %d End: %d\n",threadNum,start,end-1);

        #pragma acc parallel loop \
          copy(matrixProduct[start:end-start][:SIZE]), \
          copyin(matrixA[start:end-start][:SIZE],matrixB[:SIZE][:SIZE])
        for (row = start; row < end; row++) {
            #pragma acc loop vector
            for (col = 0; col < SIZE; col++) {
                for (key = 0; key < SIZE; key++) {
                    matrixProduct[row][col] = matrixProduct[row][col] + (matrixA[row][key] * matrixB[key][col]);
        }}}
    }
}

int main() {
   long long int matrixA[SIZE][SIZE];
   long long int matrixB[SIZE][SIZE];
   long long int matrixProduct[SIZE][SIZE];
   int i,j;
   for(i=0;i<SIZE;++i) {
     for(j=0;j<SIZE;++j) {
        matrixA[i][j] = (i*SIZE)+j;
        matrixB[i][j] = (j*SIZE)+i;
        matrixProduct[i][j]=0;
     }
   }
   multiplyMatrix(matrixA,matrixB,matrixProduct);
   printf("Result:\n");
   for(i=0;i<SIZE;++i) {
      printf("%d: %ld %ld\n",i,matrixProduct[i][0],matrixProduct[i][SIZE-1]);
   }

}
% pgcc test.c -mp -ta=tesla -Minfo=accel,mp
multiplyMatrix:
     28, Parallel region activated
     49, Generating copyin(matrixB[:130][:])
         Generating copy(matrixProduct[start:end-start][:131])
         Generating copyin(matrixA[start:end-start][:131])
         Generating Tesla code
         52, #pragma acc loop gang /* blockIdx.x */
         54, #pragma acc loop vector(128) /* threadIdx.x */
         55, #pragma acc loop seq
     54, Loop is parallelizable
     55, Complex loop carried dependence of matrixA->,matrixProduct->,matrixB-> prevents parallelization
         Loop carried dependence of matrixProduct-> prevents parallelization
         Loop carried backward dependence of matrixProduct-> prevents vectorization
     59, Parallel region terminated
% a.out
Num Threads: 4
THID 0 using GPU: 0
THID: 0, Start: 0 End: 31
THID 1 using GPU: 1
THID: 1, Start: 32 End: 63
THID 3 using GPU: 3
THID: 3, Start: 96 End: 129
THID 2 using GPU: 2
THID: 2, Start: 64 End: 95
Result:
0: 723905 141340355
1: 1813955 425843405
2: 2904005 710346455
3: 3994055 994849505
...
126: 138070205 35988724655
127: 139160255 36273227705
128: 140250305 36557730755
129: 141340355 36842233805
© www.soinside.com 2019 - 2024. All rights reserved.