Send-Recv

De Wiki de Calcul Québec
Aller à : Navigation, rechercher
Cette page est une traduction de la page Send-Recv et la traduction est complétée à 100 % et à jour.

Autres langues :anglais 100% • ‎français 100%

This example illustrates point-to-point communication using MPI_Send and MPI_Recv.

Process 0 first prepares (allocates memory and initializes with random numbers) one vector of real values for each available process (including process 0) in the run. Then it sends each vector to its corresponding process, one after the other. Before the communication, process 0 prints out the sum of all the vectors to be sent. After communication is completed, each process calculates the sum of the received vectors and the results are gathered together on process 0 using an MPI_Gather call. Process 0 then prints out these sums for comparison. This example is just an illustration of MPI_Send and MPI_Recv usage. There exists a more efficient way to perform the same work by using the collective communication MPI_Scatter, as shown in example 7 which does exactly the same work as this one. It is in fact strongly suggested to always use collective communications whenever possible.

In Fortran

File : send_recv.f
! --------------------------------------------------------
!   
!            <----------- Data ------------>
!   
!          ####################################
!          #      #      #      #      #      #
!        0 #  AA  #  BB  #  CC  #  DD  #  EE  #
!          #      #      #      #      #      #
!          ####################################
!     T    #      #      #      #      #      #
!        1 #      #      #      #      #      #
!     a    #      #      #      #      #      #
!          ####################################
!     s    #      #      #      #      #      #
!        2 #      #      #      #      #      #       BEFORE
!     k    #      #      #      #      #      #
!          ####################################
!     s    #      #      #      #      #      #
!        3 #      #      #      #      #      #
!          #      #      #      #      #      #
!          ####################################
!          #      #      #      #      #      #
!        4 #      #      #      #      #      #
!          #      #      #      #      #      #
!          ####################################
!   
!            <----------- Data ------------->
!   
!          ####################################
!          #      #      #      #      #      #
!        0 #  AA  #      #      #      #      #
!          #      #      #      #      #      #
!          ####################################
!     T    #      #      #      #      #      #
!        1 #  BB  #      #      #      #      #
!     a    #      #      #      #      #      #
!          ####################################
!     s    #      #      #      #      #      #
!        2 #  CC  #      #      #      #      #       AFTER
!     k    #      #      #      #      #      #
!          ####################################
!     s    #      #      #      #      #      #
!        3 #  DD  #      #      #      #      #
!          #      #      #      #      #      #
!          ####################################
!          #      #      #      #      #      #
!        4 #  EE  #      #      #      #      #
!          #      #      #      #      #      #
!          ####################################
!   
! Author: Carol Gauthier
!         Centre de calcul scientifique
!         Université de Sherbrooke
!
! Last revision: August 2004
! --------------------------------------------------------
Program Example_MPI
 
  include 'mpif.h'
  integer ierr,ntasks,taskid,itask,status(MPI_STATUS_SIZE)
  integer i,j,k,buffsize
  character argtmp*12
  real(8) inittime,recvtime,totaltime,rand,buffsum
 
  real(8),allocatable,dimension(:) :: recvtimes
  real(8),allocatable,dimension(:) :: buffsums
 
  real(8),allocatable,dimension(:,:) :: sendbuff
  real(8),allocatable,dimension(:) :: recvbuff
 
  !---------------------------------------------------------------
  ! MPI Initialisation. It's important to put this call at the
  ! beginning of the program, after variable declarations. 
  call MPI_INIT( ierr )
 
  !---------------------------------------------------------------
  ! Get the number of MPI processes and the taskid of this process.
  call MPI_COMM_SIZE(MPI_COMM_WORLD,ntasks,ierr)
  call MPI_COMM_RANK(MPI_COMM_WORLD,taskid,ierr)
 
  !---------------------------------------------------------------
  ! Get buffsize value from program arguments.
  call getarg(1,argtmp)
  read(argtmp,'(I12)')buffsize
 
  !---------------------------------------------------------------
  ! Printing out the description of the example.
  if ( taskid.eq.0 )then
    write(6,'(A)')
    write(6,'(A)')"##########################################################"
    write(6,'(A)')
    write(6,'(A)')" Example 4"
    write(6,'(A)')
    write(6,'(A)')" Point-to-point Communication : MPI_Send MPI_Recv"
    write(6,'(A)')
    write(6,'(A,I12)')" Vector size:",buffsize
    write(6,'(A,I5)')" Number of processes:",ntasks
    write(6,'(A)')
    write(6,'(A)')"##########################################################"
    write(6,'(A)')
    write(6,'(A)')"                --> BEFORE COMMUNICATION <--"
    write(6,'(A)')
  endif
 
 
  if ( taskid.eq.0 )then
 
    !---------------------------------------------------------------
    ! Memory allocation.
    allocate( sendbuff(0:buffsize-1,0:ntasks-1) )
    allocate( recvbuff(0:buffsize-1) )
    allocate( recvtimes(0:ntasks-1) )
    allocate( buffsums(0:ntasks-1) )
 
    !-----------------------------------------------------------------
    ! Vectors and matrices initialisation.
    do itask=0,ntasks-1
      do i=0,buffsize-1
        sendbuff(i,itask)=rand();
      end do
    end do
 
    !-----------------------------------------------------------------
    ! Print out before communication.
    do itask=1,ntasks-1
      buffsum=0.0
      do i=0,buffsize-1
        buffsum=buffsum+sendbuff(i,itask)
      end do
      write(6,'(A,I3,A,I3,A,E14.8)')"Process",taskid,": Sum of sent vector to ", & 
      &                              itask," -> ",buffsum
    end do
 
  else
 
    !---------------------------------------------------------------
    ! Memory allocation. 
    allocate( recvbuff(0:buffsize-1) )
    allocate( recvtimes(0:ntasks-1) )
    allocate( buffsums(0:ntasks-1) )
 
  end if
 
  !-----------------------------------------------------------------
  ! Communication
 
  inittime = MPI_Wtime();
 
  if ( taskid.eq.0 ) then
 
    do itask=1,ntasks-1
       call MPI_Send(sendbuff(0,itask),buffsize,MPI_REAL8, &
       &             itask,0,MPI_COMM_WORLD,ierr)  
    end do
 
  else
 
    call MPI_Recv(recvbuff,buffsize,MPI_REAL8, &
    &             0,MPI_ANY_TAG,MPI_COMM_WORLD,status,ierr);
 
    recvtime = MPI_Wtime();
 
    buffsum=0.0
    do i=0,buffsize-1
      buffsum=buffsum+recvbuff(i)
    end do
 
  end if
 
  call MPI_Barrier(MPI_COMM_WORLD,ierr)
 
  totaltime=MPI_Wtime()
 
  !-----------------------------------------------------------------
  ! Print out after communication.
 
  call MPI_Gather(recvtime,1,MPI_REAL8,recvtimes,1,MPI_REAL8, &
  &               0,MPI_COMM_WORLD,ierr)
 
  call MPI_Gather(buffsum,1,MPI_REAL8,buffsums,1,MPI_REAL8, &
  &               0,MPI_COMM_WORLD,ierr)
 
  if(taskid.eq.0)then
    write(6,*)
    write(6,'(A)')"##########################################################"
    write(6,*)
    write(6,'(A)')"                --> AFTER COMMUNICATION <--"
    write(6,*)
    do itask=1,ntasks-1
      write(6,'(A,I3,A,F5.2,A,e14.8)')"Process ",itask,": Vector received at ",recvtimes(itask), &
      &                               " seconds : Sum = ",buffsums(itask)        
    end do
    write(6,*)
    write(6,'(A)')"##########################################################"
    write(6,'(A,F5.2,A)')" Total communication time : ",totaltime," seconds"
    write(6,'(A)')"##########################################################"
    write(6,*)
  end if
 
  call MPI_Barrier(MPI_COMM_WORLD,ierr)
 
 
  !-----------------------------------------------------------------
  ! Free the allocated memory
  if(taskid.eq.0) deallocate(sendbuff)
  deallocate(recvbuff)
  deallocate(recvtimes)
  deallocate(buffsums)
 
  !-----------------------------------------------------------------
  ! MPI finalisation
  call MPI_FINALIZE( ierr )
 
end


In C

File : send_recv.c
/*--------------------------------------------------------
 
             <---------- Data ----------->
 
          ####################################
          #      #      #      #      #      #
        0 #  AA  #  BB  #  CC  #  DD  #  EE  #
          #      #      #      #      #      #
          ####################################
     T    #      #      #      #      #      #
        1 #      #      #      #      #      #
     a    #      #      #      #      #      #
          ####################################
     s    #      #      #      #      #      #
        2 #      #      #      #      #      #       BEFORE
     k    #      #      #      #      #      #
          ####################################
     s    #      #      #      #      #      #
        3 #      #      #      #      #      #
          #      #      #      #      #      #
          ####################################
          #      #      #      #      #      #
        4 #      #      #      #      #      #
          #      #      #      #      #      #
          ####################################
 
             <---------- Data ----------->
 
          ####################################
          #      #      #      #      #      #
        0 #  AA  #      #      #      #      #
          #      #      #      #      #      #
          ####################################
     T    #      #      #      #      #      #
        1 #  BB  #      #      #      #      #
     a    #      #      #      #      #      #
          ####################################
     s    #      #      #      #      #      #
        2 #  CC  #      #      #      #      #       AFTER
     k    #      #      #      #      #      #
          ####################################
     s    #      #      #      #      #      #
        3 #  DD  #      #      #      #      #
          #      #      #      #      #      #
          ####################################
          #      #      #      #      #      #
        4 #  EE  #      #      #      #      #
          #      #      #      #      #      #
          ####################################
 
 Author: Carol Gauthier
         Centre de calcul scientifique
         Université de Sherbrooke
 
 Last revision: September 2005
-------------------------------------------------------- */
#include <malloc.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "math.h"
#include "mpi.h"
 
int main(int argc,char** argv)
{
   /*===============================================================*/
   /* Declaration of variables                                      */
   int          taskid, ntasks;
   MPI_Status   status;
   int          ierr,i,j,itask;
   int	        buffsize;
   double       **sendbuff,*recvbuff,buffsum,buffsums[1024];
   double       inittime,totaltime,recvtime,recvtimes[1024];
 
   /*===============================================================*/
   /* MPI Initialisation. It's important to put this call at the    */
   /* beginning of the program, after variable declarations.        */
   MPI_Init(&argc, &argv);
 
   /*===============================================================*/
   /* Get the number of MPI processes and the taskid of this process.      */
   MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
   MPI_Comm_size(MPI_COMM_WORLD,&ntasks);
 
   /*===============================================================*/
   /* Get buffsize value from program arguments.                    */
   buffsize=atoi(argv[1]);
 
   /*===============================================================*/
   /* Printing out the description of the example.                  */
   if ( taskid == 0 ){
     printf("\n\n\n");
     printf("##########################################################\n\n");
     printf(" Example 4 \n\n");
     printf(" Point-to-point Communication: MPI_Send MPI_Recv \n\n");
     printf(" Vector size: %d\n",buffsize);
     printf(" Number of processes: %d\n\n",ntasks);
     printf("##########################################################\n\n");
     printf("                --> BEFORE COMMUNICATION <--\n\n");
   }
 
 
   if ( taskid == 0 ){
     /*=============================================================*/
     /* Memory allocation.                                          */ 
     sendbuff=(double **)malloc(sizeof(double *)*ntasks);
     sendbuff[0]=(double *)malloc(sizeof(double)*ntasks*buffsize);
     for(i=1;i<ntasks;i++)sendbuff[i]=sendbuff[i-1]+buffsize;
 
     /*=============================================================*/
     /* Vectors and matrices initialisation.                         */
     srand((unsigned)time( NULL ) + taskid);
     for(itask=0;itask<ntasks;itask++){
       for(i=0;i<buffsize;i++){
         sendbuff[itask][i]=(double)rand()/RAND_MAX;
       }
     }
 
     /*==============================================================*/
    /* Print out before communication.                              */
 
     for(itask=1;itask<ntasks;itask++){
       buffsum=0.0;
       for(i=0;i<buffsize;i++){
         buffsum=buffsum+sendbuff[itask][i];
       }
       printf("Process %d : Sum of vector sent to %d -> %e \n",
               taskid,itask,buffsum);       
     }  
 
   }
   else{
 
     /*=============================================================*/
     /* Memory allocation.                                          */ 
     recvbuff=(double *)malloc(sizeof(double)*buffsize);
 
   }
 
   /*===============================================================*/
   /* Communication.                                                */
 
   inittime = MPI_Wtime();
 
   if ( taskid == 0 ){
 
     for(itask=1 ; itask<ntasks ; itask++){
 
       ierr = MPI_Send(sendbuff[itask],
                       buffsize,
                       MPI_DOUBLE,
	               itask,
	               0,
                       MPI_COMM_WORLD);
     } 
 
   }
   else{
 
     ierr = MPI_Recv(recvbuff,
                     buffsize,
                     MPI_DOUBLE,
                     0,
                     MPI_ANY_TAG,
                     MPI_COMM_WORLD,
                     &status);
 
     recvtime = MPI_Wtime();
 
     buffsum=0.0;
     for(i=0 ; i<buffsize ; i++){
       buffsum=buffsum+recvbuff[i];
     }
 
   }
 
   MPI_Barrier(MPI_COMM_WORLD);
 
   totaltime = MPI_Wtime() - inittime;
 
   /*===============================================================*/
   /* Print out after communication.                                */
 
   ierr=MPI_Gather(&recvtime,1,MPI_DOUBLE,
                   recvtimes,1, MPI_DOUBLE,
                   0,MPI_COMM_WORLD);
 
   ierr=MPI_Gather(&buffsum,1,MPI_DOUBLE,
                   buffsums,1, MPI_DOUBLE,
                   0,MPI_COMM_WORLD);
 
   if(taskid==0){
     printf("\n");
     printf("##########################################################\n\n");
     printf("                --> AFTER COMMUNICATION <-- \n\n");
     for(itask=1;itask<ntasks;itask++){
       printf("Process %d : Vector received at %f seconds : Sum= %e\n",
               itask,recvtimes[itask],buffsums[itask]);
     }  
     printf("\n");
     printf("##########################################################\n\n");
     printf(" Communication time : %f seconds\n\n",totaltime);  
     printf("##########################################################\n\n");
   }
 
   /*===============================================================*/
   /* Free the allocated memory.                                    */
   if ( taskid == 0 ){
     free(sendbuff[0]);
     free(sendbuff);
   }
   else{
     free(recvbuff);
   }
 
   /*===============================================================*/
   /* MPI finalisation.                                             */
   MPI_Finalize();
}


Outils personnels
Espaces de noms

Variantes
Actions
Navigation
Ressources de Calcul Québec
Outils
Partager