使用 Mpi_Scatter 和 Mpi_Gather 的矩阵乘法
matrix multiplication using Mpi_Scatter and Mpi_Gather
我是 mpi 编程的新手。我正在尝试编写矩阵乘法。浏览了 post MPI Matrix Multiplication with scatter gather 关于使用分散和收集例程的矩阵乘法。
我尝试修改上面 post 上可用的代码,如下所示...
#define N 4
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stddef.h>
#include "mpi.h"
void print_results(char *prompt, int a[N][N]);
int main(int argc, char *argv[])
{
int i, j, k, rank, size, tag = 99, blksz, sum = 0;
int a[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int b[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int c[N][N];
int aa[N],cc[N];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//scatter rows of first matrix to different processes
MPI_Scatter(a, N*N/size, MPI_INT, aa, N*N/size, MPI_INT,0,MPI_COMM_WORLD);
//broadcast second matrix to all processes
MPI_Bcast(b, N*N, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//perform vector multiplication by all processes
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[i][j];
}
cc[i] = sum;
sum = 0;
}
MPI_Gather(cc, N*N/size, MPI_INT, c, N*N/size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
print_results("C = ", c);
}
void print_results(char *prompt, int a[N][N])
{
int i, j;
printf ("\n\n%s\n", prompt);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf(" %d", a[i][j]);
}
printf ("\n");
}
printf ("\n\n");
}
我运行以上程序为
$mpirun -np 4 ./a.out
对于上面的程序,我得到以下不正确的输出..
C =
0 0 -562242168 32766
1 0 4197933 0
-562242176 32766 0 0
4197856 0 4196672 0
C =
0 0 -1064802792 32765
1 0 4197933 0
-1064802800 32765 0 0
4197856 0 4196672 0
C =
30 70 29 60
70 174 89 148
29 89 95 74
60 148 74 126
C =
0 0 -1845552920 32765
1 0 4197933 0
-1845552928 32765 0 0
4197856 0 4196672 0
我有以下疑问
1. 为什么所有进程都打印结果矩阵 C。这是
应该只由主进程打印。
2. 为什么打印的结果不正确?
在这方面的更正和帮助将不胜感激。
结果矩阵 c
正在被所有进程打印,因为每个进程都执行函数 void print_results(char *prompt, int a[N][N])
。由于您在等级为 0 的进程中收集,因此在调用 print_results(...)
函数之前添加语句 if (rank == 0)
。此外,由于 :
中的错误循环逻辑,结果不正确
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[i][j];
}
这应该是:
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[j][i];
}
也没有必要广播b
因为所有进程已经有它的副本并且你可以避免MPI_Barrier()
。完整的程序变成:
#define N 4
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stddef.h>
#include "mpi.h"
void print_results(char *prompt, int a[N][N]);
int main(int argc, char *argv[])
{
int i, j, k, rank, size, tag = 99, blksz, sum = 0;
int a[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int b[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int c[N][N];
int aa[N],cc[N];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//scatter rows of first matrix to different processes
MPI_Scatter(a, N*N/size, MPI_INT, aa, N*N/size, MPI_INT,0,MPI_COMM_WORLD);
//broadcast second matrix to all processes
MPI_Bcast(b, N*N, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//perform vector multiplication by all processes
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[j][i]; //MISTAKE_WAS_HERE
}
cc[i] = sum;
sum = 0;
}
MPI_Gather(cc, N*N/size, MPI_INT, c, N*N/size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
if (rank == 0) //I_ADDED_THIS
print_results("C = ", c);
}
void print_results(char *prompt, int a[N][N])
{
int i, j;
printf ("\n\n%s\n", prompt);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf(" %d", a[i][j]);
}
printf ("\n");
}
printf ("\n\n");
}
然后c =
C =
54 37 47 57
130 93 119 145
44 41 56 71
111 79 101 123
调用 mpi_finalize 并不表示所有 MPI 进程都已终止,就像在 OpenMP 中一样!
在大多数mpi实现中,所有进程都执行MPI_init之前和MPI_Finalized之后的指令。
一个好的做法是在 MPI_Init 之前和 MPI_Finalized 之后什么都不做。
我是 mpi 编程的新手。我正在尝试编写矩阵乘法。浏览了 post MPI Matrix Multiplication with scatter gather 关于使用分散和收集例程的矩阵乘法。 我尝试修改上面 post 上可用的代码,如下所示...
#define N 4
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stddef.h>
#include "mpi.h"
void print_results(char *prompt, int a[N][N]);
int main(int argc, char *argv[])
{
int i, j, k, rank, size, tag = 99, blksz, sum = 0;
int a[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int b[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int c[N][N];
int aa[N],cc[N];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//scatter rows of first matrix to different processes
MPI_Scatter(a, N*N/size, MPI_INT, aa, N*N/size, MPI_INT,0,MPI_COMM_WORLD);
//broadcast second matrix to all processes
MPI_Bcast(b, N*N, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//perform vector multiplication by all processes
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[i][j];
}
cc[i] = sum;
sum = 0;
}
MPI_Gather(cc, N*N/size, MPI_INT, c, N*N/size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
print_results("C = ", c);
}
void print_results(char *prompt, int a[N][N])
{
int i, j;
printf ("\n\n%s\n", prompt);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf(" %d", a[i][j]);
}
printf ("\n");
}
printf ("\n\n");
}
我运行以上程序为
$mpirun -np 4 ./a.out
对于上面的程序,我得到以下不正确的输出..
C =
0 0 -562242168 32766
1 0 4197933 0
-562242176 32766 0 0
4197856 0 4196672 0
C =
0 0 -1064802792 32765
1 0 4197933 0
-1064802800 32765 0 0
4197856 0 4196672 0
C =
30 70 29 60
70 174 89 148
29 89 95 74
60 148 74 126
C =
0 0 -1845552920 32765
1 0 4197933 0
-1845552928 32765 0 0
4197856 0 4196672 0
我有以下疑问 1. 为什么所有进程都打印结果矩阵 C。这是 应该只由主进程打印。 2. 为什么打印的结果不正确?
在这方面的更正和帮助将不胜感激。
结果矩阵 c
正在被所有进程打印,因为每个进程都执行函数 void print_results(char *prompt, int a[N][N])
。由于您在等级为 0 的进程中收集,因此在调用 print_results(...)
函数之前添加语句 if (rank == 0)
。此外,由于 :
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[i][j];
}
这应该是:
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[j][i];
}
也没有必要广播b
因为所有进程已经有它的副本并且你可以避免MPI_Barrier()
。完整的程序变成:
#define N 4
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stddef.h>
#include "mpi.h"
void print_results(char *prompt, int a[N][N]);
int main(int argc, char *argv[])
{
int i, j, k, rank, size, tag = 99, blksz, sum = 0;
int a[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int b[N][N]={{1,2,3,4},{5,6,7,8},{9,1,2,3},{4,5,6,7,}};
int c[N][N];
int aa[N],cc[N];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//scatter rows of first matrix to different processes
MPI_Scatter(a, N*N/size, MPI_INT, aa, N*N/size, MPI_INT,0,MPI_COMM_WORLD);
//broadcast second matrix to all processes
MPI_Bcast(b, N*N, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//perform vector multiplication by all processes
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[j][i]; //MISTAKE_WAS_HERE
}
cc[i] = sum;
sum = 0;
}
MPI_Gather(cc, N*N/size, MPI_INT, c, N*N/size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
if (rank == 0) //I_ADDED_THIS
print_results("C = ", c);
}
void print_results(char *prompt, int a[N][N])
{
int i, j;
printf ("\n\n%s\n", prompt);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf(" %d", a[i][j]);
}
printf ("\n");
}
printf ("\n\n");
}
然后c =
C =
54 37 47 57
130 93 119 145
44 41 56 71
111 79 101 123
调用 mpi_finalize 并不表示所有 MPI 进程都已终止,就像在 OpenMP 中一样!
在大多数mpi实现中,所有进程都执行MPI_init之前和MPI_Finalized之后的指令。
一个好的做法是在 MPI_Init 之前和 MPI_Finalized 之后什么都不做。