MPI C - 将二维数组段收集到一个全局数组中

MPI C - Gather 2d Array Segments into One Global Array

在从所有其他进程接收到它的所有组件后,我试图从我的主进程打印一个动态分配的二维数组。我所说的组件是指子数组或块。

我已经使代码对进程数通用。下图将帮助您了解块在完整阵列中的排列方式。每个块由一个进程处理。不过就在这里,我们假设我 运行 程序使用 12 个进程(我本来有 8 个内核),使用命令:

mpiexec -n 12 ./gather2dArray

这是图表,专门针对 12 进程场景:

Jonathan 在这个 question 中的回答对我帮助很大,但不幸的是我无法完全实现我想要的。

我首先在每个进程中创建块,我将它们命名为 grid。每个数组都是一个动态分配的二维数组。我还创建了仅对 master 进程 (#0) 可见的全局数组 (universe)。

最后我不得不使用 MPI_Gatherv(...) 将所有子数组 assemble 放入全局数组中。然后我继续显示本地数组和全局数组。

当我 运行 使用上述命令的程序时,当我到达 MPI_Gatherv(...) 函数时出现分段错误。我不知道我做错了什么。我在下面提供了完整的代码(大量评论):

编辑

我修正了代码中的一些错误。现在 MPI_Gatherv() 有点成功了。我能够正确打印全局数组的整个第一行(我检查了进程的各个元素,它们总是匹配的)。但是当我到达第二行时,出现了一些象形文字,最后出现了分段错误。我一直无法弄清楚那里出了什么问题。仍在研究中..

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <time.h>

void print2dCharArray(char** array, int rows, int columns);


int main(int argc, char** argv)
{
  int master = 0, np, rank;
  char version[10];
  char processorName[20];
  int strLen[10];

  // Initialize MPI environment
  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &np);
  if (np != 12) { MPI_Abort(MPI_COMM_WORLD,1); }
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  // We need a different seed for each process
  srand(time(0) ^ (rank * 33 / 4));

  int nDims = 2;               // array dimensions
  int rows  = 4, columns  = 6; // rows and columns of each block
  int prows = 3, pcolumns = 4; // rows and columns of blocks. Each block is handled by 1 process

  char** grid = malloc(rows * sizeof(char*));
  for (int i = 0; i < rows; i++)
    grid[i] = malloc(columns * sizeof(char));

  char** universe = NULL;           // Global array
  char* recvPtr;                    // Pointer to start of Global array
  int Rows = rows * prows;          // Global array rows
  int Columns = columns * pcolumns; // Global array columns
  int sizes[2];                     // No of elements in each dimension of the whole array
  int subSizes[2];                  // No of elements in each dimension of the subarray
  int startCoords[2];               // Starting coordinates of each subarray
  MPI_Datatype recvBlock, recvMagicBlock;

  if (rank == master){         // For the master's eyes only
    universe = malloc(Rows * sizeof(char*));
    for (int i = 0; i < Rows; i++)
      universe[i] = malloc(Columns * sizeof(char));

    // Create a subarray (a rectangular block) datatype from a regular, 2d array
    sizes[0] = Rows;
    sizes[1] = Columns;
    subSizes[0] = rows;
    subSizes[1] = columns;
    startCoords[0] = 0;
    startCoords[1] = 0;

    MPI_Type_create_subarray(nDims, sizes, subSizes, startCoords, MPI_ORDER_C, MPI_CHAR, &recvBlock);

    // Now modify the newly created datatype to fit our needs, by specifying
    // (lower bound remains the same = 0)
    // - new extent
    // The new region / block will now "change" sooner, as soon as we reach a region of elements
    //         occupied by a new block, ie. every: (columns) * sizeof(elementType) =
    MPI_Type_create_resized(recvBlock, 0, columns * sizeof(char), &recvMagicBlock);

    MPI_Type_commit(&recvMagicBlock);
    recvPtr = &universe[0][0];
  }

  // populate arrays
  for (int y = 0; y < rows; y++){
    for (int x = 0; x < columns; x++){
      if (( (double) rand() / RAND_MAX) <= density)
    grid[y][x] = '#';
      else
        grid[y][x] = '.';
    }
  }


  // display local array
  for (int i = 0; i < np; i++){
    if (i == rank) {
      printf("\n[Rank] of [total]: No%d of %d\n", rank, np);
      print2dCharArray(grid, rows, columns);
    }
    MPI_Barrier(MPI_COMM_WORLD);
  }


  /* MPI_Gathering.. */
  int recvCounts[np], displacements[np];

  // recvCounts: how many chunks of data each process has -- in units of blocks here --
  for (int i = 0; i < np; i++)
    recvCounts[i] = 1;

  // prows * pcolumns = np
  // displacements: displacement relative to global buffer (universe) at which to place the
  //                             incoming data block from process i -- in block extents! --
  int index = 0;
  for (int p_row = 0; p_row < prows; p_row++)
    for (int p_column = 0; p_column < pcolumns; p_column++)
      displacements[index++] = p_column  +  p_row * (rows * pcolumns);

  // MPI_Gatherv(...) is a collective routine
  // Gather the local arrays to the global array in the master process
  // send type: MPI_CHAR       (a char)
  // recv type: recvMagicBlock (a block)
  MPI_Gatherv(&grid[0][0], rows * columns, MPI_CHAR, //: parameters relevant to sender
          recvPtr, recvCounts, displacements, recvMagicBlock, master, //: parameters relevant to receiver
          MPI_COMM_WORLD);

  // display global array
  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == master){
    printf("\n---Global Array---\n");
    print2dCharArray(universe, Rows, Columns);
  }

  MPI_Finalize();
  return 0;
}


void print2dCharArray(char** array, int rows, int columns)
{
  int i, j;
  for (i = 0; i < rows; i++){
    for (j = 0; j < columns; j++){
      printf("%c ", array[i][j]);
    }
    printf("\n");
  }
  fflush(stdout);
}

以下是我得到的输出。无论我尝试什么,我都无法克服这个问题。如您所见,使用 4 个进程的前 4 个块正确打印了全局数组的第一行。当跳到下一行时,我们得到象形文字..

hostname@User:~/mpi$ mpiexec -n 12 ./gather2darray
MPICH Version:  3User
Processor name: User

[Rank] of [total]: No0 of 12
. . # . . # 
# . # # # . 
. . . # # . 
. . # . . . 

[Rank] of [total]: No1 of 12
. . # # . . 
. . . . # # 
. # . . # . 
. . # . . . 

[Rank] of [total]: No2 of 12
. # # # . # 
. # . . . . 
# # # . . . 
. . . # # . 

[Rank] of [total]: No3 of 12
. . # # # # 
. . # # . . 
# . # . # . 
. . . # . . 

[Rank] of [total]: No4 of 12
. # . . . # 
# . # . # . 
# . . . . . 
# . . . . . 

[Rank] of [total]: No5 of 12
# # . # # . 
# . . # # . 
. . . . # . 
. # # . . . 

[Rank] of [total]: No6 of 12
. . # # . # 
. . # . # . 
# . . . . . 
. . . # # # 

[Rank] of [total]: No7 of 12
# # . # # . 
. # # . . . 
. . . . . # 
. . . # # . 

[Rank] of [total]: No8 of 12
. # . . . . 
# . # . # . 
. . . # . # 
# . # # # . 

[Rank] of [total]: No9 of 12
. . . . . # 
. . # . . . 
. . # . . # 
. . # # . . 

[Rank] of [total]: No10 of 12
. . . . # . 
# . . . . . 
. . # # . . 
. . . # . # 

[Rank] of [total]: No11 of 12
. # . . # . 
. # . # # . 
. . . # . . 
. # . # . # 

---Global Array---
. . # . . # . . # # . . . # # # . # . . # # # # 
� � < *   � � e {   � � � � �       �  
   J                       









*** Error in `./gather2darray': double free or corruption (out): 0x0000000001e4c050 ***
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated
*** stack smashing detected ***: ./gather2darray terminated

===================================================================================
=   BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
=   PID 10979 RUNNING AT User
=   EXIT CODE: 139
=   CLEANING UP REMAINING PROCESSES
=   YOU CAN IGNORE THE BELOW CLEANUP MESSAGES
===================================================================================
YOUR APPLICATION TERMINATED WITH THE EXIT STRING: Segmentation fault (signal 11)
This typically refers to a problem with your application.
Please see the FAQ page for debugging suggestions

非常感谢您的帮助。提前致谢。

你的代码几乎是正确的,你只是忘记了一个MPI的重要原理。当您在 MPI 函数上使用数组时,MPI 假定您的数组内存是连续分配的。所以你必须改变你的 2 dims 数组分配。

  #include <stdio.h>
  #include <stdlib.h>
  #include <mpi.h>
  #include <time.h>

  void print2dCharArray(char** array, int rows, int columns);


  int main(int argc, char** argv)
  {
    int master = 0, np, rank;
    char version[10];
    char processorName[20];
    int strLen[10];

    // Initialize MPI environment
    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &np);
    if (np != 12) { MPI_Abort(MPI_COMM_WORLD,1); }
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // We need a different seed for each process
    srand(time(0) ^ (rank * 33 / 4));

    int nDims = 2;               // array dimensions
    int rows  = 4, columns  = 6; // rows and columns of each block
    int prows = 3, pcolumns = 4; // rows and columns of blocks. Each block is handled by 1 process

    char* pre_grid = (char*) malloc(rows * columns * sizeof(char));
    char** grid = (char**) malloc(rows * sizeof(char*));
    for (int i = 0; i < rows; i++)
      grid[i] = &(pre_grid[i * columns]);

    char** universe = NULL;           // Global array
    char* pre_universe = NULL;
    char* recvPtr;                    // Pointer to start of Global array
    int Rows = rows * prows;          // Global array rows
    int Columns = columns * pcolumns; // Global array columns
    int sizes[2];                     // No of elements in each dimension of the whole array
    int subSizes[2];                  // No of elements in each dimension of the subarray
    int startCoords[2];               // Starting coordinates of each subarray
    MPI_Datatype recvBlock, recvMagicBlock;

    if (rank == master){         // For the master's eyes only
  /*    universe = malloc(Rows * sizeof(char*));*/
  /*    for (int i = 0; i < Rows; i++)*/
  /*      universe[i] = malloc(Columns * sizeof(char));*/

      pre_universe = (char*) malloc(Rows * Columns * sizeof(char));
      universe = (char**) malloc(Rows * sizeof(char*));
      for (int i = 0; i < Rows; i++) {
          universe[i] = &(pre_universe[i * Columns]);
      }



      // Create a subarray (a rectangular block) datatype from a regular, 2d array
      sizes[0] = Rows;
      sizes[1] = Columns;
      subSizes[0] = rows;
      subSizes[1] = columns;
      startCoords[0] = 0;
      startCoords[1] = 0;

      MPI_Type_create_subarray(nDims, sizes, subSizes, startCoords, MPI_ORDER_C, MPI_CHAR, &recvBlock);

      // Now modify the newly created datatype to fit our needs, by specifying
      // (lower bound remains the same = 0)
      // - new extent
      // The new region / block will now "change" sooner, as soon as we reach a region of elements
      //         occupied by a new block, ie. every: (columns) * sizeof(elementType) =
      MPI_Type_create_resized(recvBlock, 0, columns * sizeof(char), &recvMagicBlock);

      MPI_Type_commit(&recvMagicBlock);
      recvPtr = &universe[0][0];
    }

    // populate arrays
    for (int y = 0; y < rows; y++){
      for (int x = 0; x < columns; x++){
        grid[y][x] = rank + 65;
      }
    }


    // display local array
    for (int i = 0; i < np; i++){
      if (i == rank) {
        printf("\n[Rank] of [total]: No%d of %d\n", rank, np);
        print2dCharArray(grid, rows, columns);
      }
      MPI_Barrier(MPI_COMM_WORLD);
    }


    /* MPI_Gathering.. */
    int recvCounts[np], displacements[np];

    // recvCounts: how many chunks of data each process has -- in units of blocks here --
    for (int i = 0; i < np; i++)
      recvCounts[i] = 1;

    // prows * pcolumns = np
    // displacements: displacement relative to global buffer (universe) at which to place the
    //                             incoming data block from process i -- in block extents! --
    int index = 0;
    for (int p_row = 0; p_row < prows; p_row++)
      for (int p_column = 0; p_column < pcolumns; p_column++)
        displacements[index++] = p_column  +  p_row * (rows * pcolumns);

    // MPI_Gatherv(...) is a collective routine
    // Gather the local arrays to the global array in the master process
    // send type: MPI_CHAR       (a char)
    // recv type: recvMagicBlock (a block)
    MPI_Gatherv(&grid[0][0], rows * columns, MPI_CHAR, //: parameters relevant to sender
            recvPtr, recvCounts, displacements, recvMagicBlock, master, //: parameters relevant to receiver
            MPI_COMM_WORLD);

    // display global array
    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == master){
      printf("\n---Global Array---\n");
      print2dCharArray(universe, Rows, Columns);
    }

    free(grid[0]);
    free(grid);
    if (rank == master) {
      free(universe[0]);
      free(universe);
      MPI_Type_free(&recvMagicBlock);
      MPI_Type_free(&recvBlock);
    }


    MPI_Finalize();
    return 0;
  }


  void print2dCharArray(char** array, int rows, int columns)
  {
    int i, j;
    for (i = 0; i < rows; i++){
      for (j = 0; j < columns; j++){
        printf("%c ", array[i][j]);
      }
      printf("\n");
    }
    fflush(stdout);
  }

输出:

---Global Array---
    A A A A A A B B B B B B C C C C C C D D D D D D 
    A A A A A A B B B B B B C C C C C C D D D D D D 
    A A A A A A B B B B B B C C C C C C D D D D D D 
    A A A A A A B B B B B B C C C C C C D D D D D D 
    E E E E E E F F F F F F G G G G G G H H H H H H 
    E E E E E E F F F F F F G G G G G G H H H H H H 
    E E E E E E F F F F F F G G G G G G H H H H H H 
    E E E E E E F F F F F F G G G G G G H H H H H H 
    I I I I I I J J J J J J K K K K K K L L L L L L 
    I I I I I I J J J J J J K K K K K K L L L L L L 
    I I I I I I J J J J J J K K K K K K L L L L L L 
    I I I I I I J J J J J J K K K K K K L L L L L L