We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF or read online on Scribd
You are on page 1/ 30
dye all dae all
ARVN AlaCollective Communication
ducLeall Jat! ail si
4aLull Point-To_Point Sell Ge Jai! ail si Ql) Adbeyh
SYLAY alt Ge al de sen0 MPI A! padi «(Send, Receive)
.Collective Communication 4lea!
c@ point-to point Chet) ali Ge Ads Jor mand le senel! ode]
Baas Laghed
gu db dagasall Processes JI US ai tity be Le Goleall ULI)
= communicatio Juaiy!
SULA Sai Aglec aii Lesic: @ilill ele sia! eis process US Jal «oO
Vs lal) yas [bles Gis JSS) ptt ots JLaiy ali Gitag JisI0
2 Gane eds al Gelscet leall Jai! al gi gt sil
bela!) Jlaty! gh gi Gye ilinal 45 dle
MPI_Barrier(): (global synchronizations) Aa! Sl 3) Geet A ol
PI) Ga Chall Jats 2 a tl I IG)
MPI_Beast() Uta! alet! as 3 IO
MPI_Scatter() : (Process) Oli\je¥ ole CbLa!l apa gl GbLall sas nj IO
MPI_Gather() : 4 5 yal! Guta! oes)
POU ay! gee Gate qpanll (sl de 5 yall LUM og gad ill lel ja geen Abul y areal]
MPI_Allgather()
3s tale asi ASI yal US cpl Leeds Ole 5 p05 ll GUI a) ae Aaa! ee 80)
MPI_Alltotal() : JSu
2 ghy Da I UL gle Shibee Lit 9 gS SVL) 6 la) WALES Ath al
corel! Leal — Gall - asl) Ste (reduction operations) ( c=!) JSY) ol SO
2 My (thie Ugh yay Collars Ll gf La ibaa a pads spt Legal -
MPI_Reduce()
Ud 8 Gl process Sil Yl US ool) Sc gall Lah SILAS! Gl! LAY od tO
MPI_Allreduce() = etl; MPI_Bcast() & MPI_Reduce()alti Gull Broadcast
( Broadcast) pwi cdi 3.( Broadcast) pWi Oo) ps Slat) eset wig gigi aw! *
Skat J GProcesses J! JS J) Obl! nt ye He Jyh dey Process asi
MPI_Beast( ) 52 MPI J J eal) Colt 588 gl wil. Edel Dag A Spy,alell ull Broadcast
Gee sas A Real mua 2 of) Agi ya! 8 atl! ele skal aay ail ead Wil Cus .
(Processes) J!
ro fC »o LOO
Pl I I I MPI_Beast( ) af
v2 fT ” eTalell Gull Broadcast
MPI_Bcast() @ilill delat] Aasuall »
int MPI_Bcast(void *buf, /**** in /out ****/
int count, / *** in ***/
MPI_Datatype datatypem, /**** in ****/
int root, /***** in *#*#/
MPIL_Comm comm /**#* in ****/)
Abe yall AML tI pwolie Hula 3 Sill af se: buf O
Ley! jolie ase coun O
UL) jeclic bly ei: datatypem 0
As! Aae¥l 455 troot O
ual 4a: comm Oalell Gull Broadcast
MPI_Bcast() @ilill delat] Aasuall »
int MPI_Bcast(void “buf, int count, MPI_Datatype ,datatypem int root, MPI_Comm comm)
root J su rank J) ois process I) de buffer 51g sagrgl) bl JLyb alll Lis gy
-communicator J) 4U3 2 process 412) J J) communicator J)
wd ndyy 4 MPI_Beast oyig)) cles!
LISD FOOL jal) ee ds
Lb oe Uy MPI Recv ola dauiy bs SUEY gs Ye
BN aLeaU rank Ji ang ke thaty Dead oe Dad deal Dae go Lateey «
= \ pall
VBL 3 OSG US lhe Lady Steg! De Gg Eda OSS glance
SLT nef SiN oye Cae cetyl Gel Spt Cam ot lady Bee ST pelt EN yy pe! oy
Bde a LE et dla oe JS Raby hg ye Cell op eget LD eb epiledl cba Yl pled le JS
csp faa LED epic gh GA) bball de COE es Ready Mee deesalell ull Broadcast
wie é(0) dy Beal 3999 5 O ony igs slbel 5 rye Bybee Ales dy qty: Se #
Bla] ISS UN aigiall ReLb we SLa dey G OLAV gt le Myke! oleHincludeciostream>
Hincludecmpi.h>
using namespace std;
void mainjint arge, char** argv)
4
int mynode,tatalnodes;
MPL_Init(&arge,8argv);
MPI_Comm_size(MPI_COMM_WORLD,Btotalnodes);,
MPL_Comm_rank(MPl_COMM_WORLD,&mynode);
int ats;
coutee"my rank i
if{mynode==0){
forlinti=0;1e5;i++){
alil=rand()%1000;
}
cemynodeccendl;
)
coutc<"before Beast a is "<
fincludecmpi>
sing namespace ta;
void maintint argc, char*® argv)
{
in sendArrayl12];
int receivescrayl12);
int processesNumber;
int currentProcess;
MPL Init|@arge, Bare)
[MPL_Comm_sae(MPL_COMM_WORLD,&pracessesNumber);
[MPL_Comm_rank|MPL_COMM,_WORLD, BcurrentProcess)
if{1.2%peocessesttumber)!=0)
{
(MPL Finalize);
return;
’
itfeurrentProcess
forlintn0;<12sie4)
sendArrayli=isd;
)
)
[MP|_Seatter(sendArray12/praceccesNumber,MPL_INT,receiveArray,12/processesNumber,MPI_INT,0,MPL_COMM WORLD);
coutee"Pracess "cccurrentProcessc«" has"3U (~Ssi ) MPI_Gather +!>-¥) jlse! Susauoill Gatther
rodoly dbgies J Olbgives ste oom de qty ash: : Slettincludeciostream>
tinclude
using namespace std;
void main(int argc, char** argv)
¢
int sendAarray[12] ;
int receivearray[12];
int processesNumber}
int currentProcess;
MPI_Init(Sargc, &argy) 5
MPT_Comm_size(MPI_COMM WORLD, &processesNumber);
MPI_Comm_rank(MPI_COMM WORLD, ¤tProcess) ;
if ((12%processesNumber) !=@)
{
MPI_Finalize();
return;
t
for(int i=0;i<12/processesNumber; i++)
sendArray[i]=(12/processesNumber)*currentProcess+i;
MPI_Gather(sendArray, 12/processesNumber ,MPT_INT,receivearray, 12/processesNumber ,MPT_INT,@,M
PI_COMM_WORLD) ;
)
if(currentProcess.
£
for(int i=034<12;i++)
cout<
Ainelude
Using namespace sta;
‘void main(int argc, char** argvi{
int mynode, tetalnodes;
intx=5000;
int y=6000;
int primes[1000);
int primteN(5);
intcountP=9;
int startval,endval
MPI Status status;
MPI_tnit(&argc Sarg);
MPI_Comm_size(MP|_COMM_WORLO, &totalnades}; // get totalnodes
MPI_Comm_rank|Mi_COMM_WORLD, &mynode);
U/ get mynode
int iney-x;
startval = (lin*(mynode}/totalnodes+x
endval =(in*(rynede+)/tetalnodes-x;
fortin
bool t=true;
for(int m=2;mei;mr+]{
ise:
‘false;
break;
idvalit+}{
countP+s;if{countP<5){
for(int j=countP;j<5;j++)
primteN[j]=-1;
}
cout<<"the 5 prime in process "<
Hinclude
using namespace std;
main(int argc, char** argvi{
int mynode, totalnodes;
int myrand;
int a[100];
v
MPI Status status;
MPI_Init(Sargc, 8argv);
MPI_Comm_size(MPI_COMM_WORLD, &totalnodes); // get totalnodes
MPL_Comm_rank(MP|_COMM_WORLD, &mynode); // get mynode
myrand=rand(}%6100*(mynode+1);
MPI_Allgather(&myrand,1, MPI_INT,a,1,MPI_INT,MPI_COMM_WORLD);
cout<<"Processor "< sll sendbuf US c« sendcount ests e&
US & recvcount 3s process US us process ul) US <2) (
4a yiredl! ane Ge »S) process J! 22% lS Ub 44s porocess
cle process J pues ace col} alld Gay Cus UStie Gass de j gall
Ales yall Lay Gye inaMPI_Alltoall
int MPI_Alltoall(void *sendbuf ,
int sendcount,
MPI_Datatype sendtype ,
void *recvbuf,
int recvcount,
MPI_Datatype recvtype,
MPI_Comm comm)
axe uly process USI Jus (gill _poliell axe 8: sendcount
. sendbuf d) ga das gall polisll
22d Gly process JS Jus (ys Aldied! jwaliall
sendcount=recvcount,
:recvcount
Allied) joliel! ISI)
Wail gues (A
sendtype=recvt ypeMPI_Alltoall
Process US (23 5255.50 a [paul Ad piers Oly sine 0 bss p gh lll eli gsl tbe
Ciclo Jd juolic Lud Ui siveall ssa Guns 5 89! processes J! gues Ul
5 ss Ws Sil jayl ses process Jl (rank ) 4é)Hinclude
#include
using namespace std;
void main(int argc, char** argv){
int mynode, totalnodes;
int a[5];
int b[5000];
MPI_Status status;
MPI_Init(&arge,8argv);
MPI_Comm_size(MPI_COMM_WORLD, &totalnodes); // get
totalnodes
MPI_Comm_rank(MP|_COMM_WORLD, &mynode); // get mynode
for(int i=0;i<5;i++)
ali]=mynode*(i+1);
MPI_Alltoall(a,1,MPI_INT,b,1,MPI_INT,MPI_COMM_WORLD);
cout<<"Processor "<