MPI_Barrier
MPI_Barrier函数 用于一个通信子中所有进程的同步,调用函数时进程将处于等待状态,直到通信子中所有进程 都调用了该函数后才继续执行。
MPI_METHOD
(
_In_ MPI_Comm comm
);
代码实例
#include "stdlib.h"
#include "stdio.h"
#include "mpi.h"
#include "time.h"
#include "windows.h"
int main(int args, char **argv){
MPI_Init(&args, &argv);
int process_id;
MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
int process_num;
MPI_Comm_size(MPI_COMM_WORLD, &process_num);
srand(process_id * time(NULL));
long long int t_start = GetTickCount();
int sleep_time = rand() % 4000;
Sleep(sleep_time);
printf("process %d sleeped %dms.", process_id, sleep_time);
MPI_Barrier(MPI_COMM_WORLD);
long long int t_end = GetTickCount();
printf(" process %d finished in %lld ms", process_id, t_end - t_start);
MPI_Finalize();
}
"C:\Program Files\Microsoft MPI\Bin\mpiexec.exe" -np 5 ./MPI_Synchronization_Points
process 3 sleeped 2559ms. process 3 finished in 3547 ms
process 1 sleeped 3545ms. process 1 finished in 3547 ms
process 2 sleeped 3052ms. process 2 finished in 3547 ms
process 0 sleeped 38ms. process 0 finished in 3547 ms
process 4 sleeped 2066ms. process 4 finished in 3547 ms
Process finished with exit code 0
MPI_Bcast
一个广播发生的时候,一个进程会把同样一份数据传递给一个 communicator 里的所有其他进程。根节点调用 MPI_Bcast 函数的时候,data 变量里的值会被发送到其他的节点上。当其他的节点调用 MPI_Bcast 的时候,data 变量会被赋值成从根节点接受到的数据。MPI_Bcast的实现使用了一个树形广播算法来获得比较好的网络利用率。
MPI_METHOD
MPI_Bcast(
_Pre_opt_valid_ void* buffer, // 指向数据缓冲区的指针。 在根参数指定的进程中,缓冲区包含要广播的数据。在通信器中由 comm 参数指定的所有其他进程上,缓冲区接收根进程广播的数据。
_In_range_(>=, 0) int count, // 缓冲区中的数据元素数。
_In_ MPI_Datatype datatype,
_mpi_coll_rank_(root) int root, // 正在发送数据的进程的排名。
_In_ MPI_Comm comm
);
代码实例
#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
#include "time.h"
int main(int args, char **argv){
MPI_Init(&args, &argv);
int process_id;
MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
int process_num;
MPI_Comm_size(MPI_COMM_WORLD, &process_num);
int data = 0;
if (process_id == 0) {
srand(time(NULL));
data = rand() % 300;
}
printf("process %d data before board cast: %d --> ", process_id, data);
MPI_Bcast(&data,1,MPI_INT,0,MPI_COMM_WORLD);
printf("process %d data after board cast: %d", process_id, data);
MPI_Finalize();
}
"C:\Program Files\Microsoft MPI\Bin\mpiexec.exe" -np 4 ./MPI_BoardCast
process 1 data before board cast: 0 --> process 1 data after board cast: 213
process 3 data before board cast: 0 --> process 3 data after board cast: 213
process 2 data before board cast: 0 --> process 2 data after board cast: 213
process 0 data before board cast: 213 --> process 0 data after board cast: 213
Process finished with exit code 0
MPI_Scatter
MPI_Bcast给每个进程发送的是同样的数据,MPI_Scatter给每个进程发送的是一个数组的一部分数据。
MPI_METHOD
MPI_Scatter(
_In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
_In_range_(>=, 0) int sendcount,
_In_ MPI_Datatype sendtype,
_When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
_In_range_(>=, 0) int recvcount,
_In_ MPI_Datatype recvtype,
_mpi_coll_rank_(root) int root,
_In_ MPI_Comm comm
);
代码实例
#include "mpi.h"
#include "stdlib.h"
#include "stdio.h"
int main(int args, char **argv){
MPI_Init(&args, &argv);
int process_id;
MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
int process_num;
MPI_Comm_size(MPI_COMM_WORLD, &process_num);
int *scatter_info = malloc(sizeof (int) * process_num);
if (process_id == 0){
for (int i = 0; i < process_num; ++i) {
scatter_info[i] = i;
}
}
int recv_data;
MPI_Scatter(scatter_info, 1, MPI_INT, &recv_data, 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("proccess %d recv data: %d \n", process_id, recv_data);
MPI_Finalize();
}
"C:\Program Files\Microsoft MPI\Bin\mpiexec.exe" -np 6 ./MPI_Scatter_Info
proccess 0 recv data: 0
proccess 4 recv data: 4
proccess 1 recv data: 1
proccess 2 recv data: 2
proccess 3 recv data: 3
proccess 5 recv data: 5
Process finished with exit code 0
MPI_Gather
MPI_Gather和MPI_scatter刚好相反,他的作用是从所有的进程中将每个进程的数据集中到根进程中,同样根据进程的编号对array元素排序。
MPI_METHOD
MPI_Gather(
_In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
_In_range_(>=, 0) int sendcount,
_In_ MPI_Datatype sendtype,
_When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
_In_range_(>=, 0) int recvcount,
_In_ MPI_Datatype recvtype,
_mpi_coll_rank_(root) int root,
_In_ MPI_Comm comm
);
代码实例
#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
int main(int args, char **argv){
MPI_Init(&args, &argv);
int process_id;
MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
int process_num;
MPI_Comm_size(MPI_COMM_WORLD, &process_num);
srand(process_id * 2000);
int data = rand() % 200;
printf("process %d got data : %d\n", process_id, data);
int * recv_data = malloc(sizeof (int) * process_num);
MPI_Gather(&data, 1, MPI_INT, recv_data, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (process_id == 0){
printf("process 0 recv data : ");
for (int i = 0; i < process_num; ++i) {
printf("%d ", recv_data[i]);
}
}
MPI_Finalize();
}
"C:\Program Files\Microsoft MPI\Bin\mpiexec.exe" -np 6 ./MPI_Gather_Info
process 4 got data : 163
process 3 got data : 32
process 5 got data : 94
process 1 got data : 169
process 2 got data : 100
process 0 got data : 38
process 0 recv data : 38 169 100 32 163 94
Process finished with exit code 0
MPI_Allgather
当数据分布在所有的进程中时,MPI_Allgather将所有的数据聚合到每个进程中。
MPI_METHOD
MPI_Allgather(
_In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
_In_range_(>=, 0) int sendcount,
_In_ MPI_Datatype sendtype,
_Out_opt_ void* recvbuf,
_In_range_(>=, 0) int recvcount,
_In_ MPI_Datatype recvtype,
_In_ MPI_Comm comm
);
代码实例
#include "stdlib.h"
#include "stdio.h"
#include "mpi.h"
int main(int args, char **argv) {
MPI_Init(&args, &argv);
int process_id;
MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
int process_num;
MPI_Comm_size(MPI_COMM_WORLD, &process_num);
srand(process_id * 1000);
int data = rand() % 200;
printf("process %d has data : %d\n", process_id, data);
int * recv_data = malloc(sizeof (int) * process_num);
MPI_Allgather(&data, 1, MPI_INT, recv_data, 1, MPI_INT, MPI_COMM_WORLD);
printf("process %d got recv_data : ", process_id);
for (int i = 0; i < process_num; ++i) {
printf("%d ", recv_data[i]);
}
MPI_Finalize();
}
"C:\Program Files\Microsoft MPI\Bin\mpiexec.exe" -np 5 ./MPI_Allgather_Info
process 0 has data : 38
process 0 got recv_data : 38 104 169 35 100
process 3 has data : 35
process 3 got recv_data : 38 104 169 35 100
process 4 has data : 100
process 4 got recv_data : 38 104 169 35 100
process 2 has data : 169
process 2 got recv_data : 38 104 169 35 100
process 1 has data : 104
process 1 got recv_data : 38 104 169 35 100
Process finished with exit code 0
Comments | NOTHING