MPI Reduce

De Wiki de Calcul Québec
(Redirigé depuis Reduce/en)
Aller à : Navigation, rechercher
Cette page est une traduction de la page MPI Reduce et la traduction est complétée à 100 % et à jour.

Autres langues :anglais 100% • ‎français 100%

This example show how to use the MPI_Reduce function.

Each process prepares one vector sendbuff of length buffsize of real random numbers. MPI_Reduce applies a mathematical operation (addition in this case) on the jth element of sendbuff of all the available processes, and places the results in the jth element of recvbuff in the root process (process 0 in this case), for all elements of sendbuff. The mathematical operation that is applied can be a sum, a product, a search of the maximum or minimum, etc. See the documentation for the other mathematical operations available.

Similarly, the function MPI_Allreduce does the same work and broadcasts the resulting vector (recvbuff) to all processes.

Before and after communication the sum for each vector is printed out for verification.

In Fortran

File : reduce.f
!--------------------------------------------------------
!  
!            <--- sendbuff ---->
!           
!     T     *********************
!         0 * A0 * B0 * .. * Z0 *
!     a     *********************
!         1 * A1 * B1 * .. * Z1 *
!     s     *********************
!         2 * A2 * B2 * .. * Z2 *
!     k     *********************
!         3 * A3 * B3 * .. * Z3 *
!     s     *********************
!         4 * A4 * B4 * .. * Z4 *
!           *********************
!          
!            <--- recvbuff ---->
!           
!     T     **************************************************
!         0 * A0+A1+..+A4 * B0+B1+..+B4 *  ..  * Z0+Z1+..+Z4 *
!     a     **************************************************
!         1 *             *             *  ..  *             *
!     s     **************************************************
!         2 *             *             *  ..  *             *
!     k     **************************************************
!         3 *             *             *  ..  *             *
!     s     **************************************************
!         4 *             *             *  ..  *             *
!           **************************************************
!
! Author: Carol Gauthier
!         Centre de calcul scientifique
!         Université de Sherbrooke
!
! Last revision: 2004/08/25
!--------------------------------------------------------
Program Example_MPI
 
  include 'mpif.h'
  integer ierr,ntasks,taskid,itask,status(MPI_STATUS_SIZE)
  integer i,j,k,buffsize
  character argtmp*12
  real(8) inittime,recvtime,totaltime,rand,buffsum,totalsum
 
  real(8),allocatable,dimension(:) :: sendbuff
  real(8),allocatable,dimension(:) :: recvbuff
 
  !---------------------------------------------------------------
  ! MPI Initialisation. It's important to put this call at the
  ! beginning of the program, after variable declarations.
  call MPI_INIT( ierr )
 
  !---------------------------------------------------------------
  ! Get the number of MPI processes and the taskid of this process.
  call MPI_COMM_SIZE(MPI_COMM_WORLD,ntasks,ierr)
  call MPI_COMM_RANK(MPI_COMM_WORLD,taskid,ierr)
 
  !---------------------------------------------------------------
  ! Get buffsize value from program arguments.
  call getarg(1,argtmp)
  read(argtmp,'(I12)')buffsize
 
  !---------------------------------------------------------------
  ! Printing out the description of the example.
  if ( taskid.eq.0 )then
    write(6,'(A)')
    write(6,'(A)')"##########################################################"
    write(6,'(A)')
    write(6,'(A)')" Example 12"
    write(6,'(A)')
    write(6,'(A)')" Collective Communication : MPI_Reduce"
    write(6,'(A)')
    write(6,'(A,I12)')" Vector size:",buffsize
    write(6,'(A,I5)')" Number of processes:",ntasks
    write(6,'(A)')
    write(6,'(A)')"##########################################################"
    write(6,'(A)')
    write(6,'(A)')"                --> BEFORE COMMUNICATION <--"
    write(6,'(A)')
  endif
 
  !---------------------------------------------------------------
  ! Memory allocation. 
  allocate( sendbuff(0:buffsize-1) )
  allocate( recvbuff(0:buffsize-1) )
 
  !-----------------------------------------------------------------
  ! Vectors and/or matrices initialisation.
  call srand(taskid*10)
  do i=0,buffsize-1
    sendbuff(i)=rand()
  end do
 
  !-----------------------------------------------------------------
  ! Print out before communication.
 
  call MPI_Barrier(MPI_COMM_WORLD,ierr)
 
  buffsum=0.0
  do i=0,buffsize-1
    buffsum=buffsum+sendbuff(i)
  end do
 
  write(6,'(A,I3,A,E14.8)')"Process",taskid,": Sum of sendbuff elements= ",buffsum
 
  call MPI_Barrier(MPI_COMM_WORLD,ierr)
 
  call MPI_Reduce(buffsum,totalsum,1,MPI_REAL8,MPI_SUM,0,MPI_COMM_WORLD,ierr)
 
if(taskid.eq.0)then
    write(6,*)"                                         ============="
    write(6,'(A,E14.8)')"                                   TOTAL : ",totalsum
  end if
 
  !-----------------------------------------------------------------
  ! Communication
 
  inittime = MPI_Wtime()
 
  call MPI_Reduce(sendbuff,recvbuff,buffsize,MPI_REAL8,MPI_SUM, &
  &               0,MPI_COMM_WORLD,ierr)
 
  totaltime = MPI_Wtime()
 
  !-----------------------------------------------------------------
  ! Print out after communication.
  if(taskid.eq.0)then
    write(6,*)
    write(6,'(A)')"##########################################################"
    write(6,*)
    write(6,'(A)')"                --> AFTER COMMUNICATION <--"
    write(6,*)
    buffsum=0.0
    do i=0,buffsize-1
      buffsum=buffsum+recvbuff(i)
    end do
    write(6,'(A,I3,A,E14.8)')"Task ",taskid,": Sum of recvbuff elements ",buffsum
    write(6,*)
    write(6,'(A)')"##########################################################"
    write(6,'(A,F5.2,A)')" Total communication time : ",totaltime," seconds"
    write(6,'(A)')"##########################################################"
    write(6,*)
  end if
 
  !-----------------------------------------------------------------
  ! Free the allocated memory
  deallocate(sendbuff)
  deallocate(recvbuff)
 
  !-----------------------------------------------------------------
  ! MPI finalisation
 
  call MPI_FINALIZE( ierr )
 
end


In C

File : reduce.c
/*--------------------------------------------------------
 
            <--- sendbuff ---->
 
     T     *********************
         0 * A0 * B0 * .. * Z0 *
     a     *********************
         1 * A1 * B1 * .. * Z1 *
     s     *********************
         2 * A2 * B2 * .. * Z2 *
     k     *********************
         3 * A3 * B3 * .. * Z3 *
     s     *********************
         4 * A4 * B4 * .. * Z4 *
           *********************
 
            <--- recvbuff ---->
 
     T     **************************************************
         0 * A0+A1+..+A4 * B0+B1+..+B4 *  ..  * Z0+Z1+..+Z4 *
     a     **************************************************
         1 *             *             *  ..  *             *
     s     **************************************************
         2 *             *             *  ..  *             *
     k     **************************************************
         3 *             *             *  ..  *             *
     s     **************************************************
         4 *             *             *  ..  *             *
           **************************************************
 Author: Carol Gauthier
         Centre de calcul scientifique
         Université de Sherbrooke
 
 Last revision: September 2005
--------------------------------------------------------*/
 
#include <malloc.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "math.h"
#include "mpi.h"
 
int main(int argc,char** argv)
{
   /*===============================================================*/
   /* Declaration of variables                                      */
   int          taskid, ntasks;
   MPI_Status   status;
   int          ierr,i,j,itask;
   int	        buffsize;
   double       *sendbuff,*recvbuff,buffsum,totalsum;
   double       inittime,totaltime;
 
   /*===============================================================*/
   /* MPI Initialisation. It is important to put this call at the   */
   /* beginning of the program, after variable declarations.        */
   MPI_Init(&argc, &argv);
 
   /*===============================================================*/
   /* Get the number of MPI processes and the taskid of this process.      */
   MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
   MPI_Comm_size(MPI_COMM_WORLD,&ntasks);
 
   /*===============================================================*/
   /* Get buffsize value from program arguments.                    */
   buffsize=atoi(argv[1]);
 
   /*===============================================================*/
   /* Printing out the description of the example.                  */
   if ( taskid == 0 ){
     printf("\n\n\n");
     printf("##########################################################\n\n");
     printf(" Example 12 \n\n");
     printf(" Collective Communication : MPI_Reduce \n\n");
     printf(" Vector size: %d\n",buffsize);
     printf(" Number of processes: %d\n\n",ntasks);
     printf("##########################################################\n\n");
     printf("                --> BEFORE COMMUNICATION <--\n\n");
   }
 
   /*=============================================================*/
   /* Memory allocation.                                          */ 
   sendbuff=(double *)malloc(sizeof(double)*buffsize);
   recvbuff=(double *)malloc(sizeof(double)*buffsize);
 
   /*=============================================================*/
   /* Vectors and/or matrices initialisation.                     */
   srand((unsigned)time( NULL ) + taskid);
   for(i=0;i<buffsize;i++){
       sendbuff[i]=(double)rand()/RAND_MAX;
   }
 
   /*==============================================================*/
   /* Print out before communication.                              */
 
   MPI_Barrier(MPI_COMM_WORLD);
 
   buffsum=0.0;
   for(i=0;i<buffsize;i++){
     buffsum=buffsum+sendbuff[i];
   }
   printf(" Process %d : Sum of sendbuff elements = %e \n",taskid,buffsum);
 
   MPI_Barrier(MPI_COMM_WORLD);
 
   ierr=MPI_Reduce(&buffsum,&totalsum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
   if(taskid==0){
     printf("                                            =============\n",taskid,totalsum);
     printf("                                     TOTAL : %e \n\n",totalsum);   
   }
 
   /*===============================================================*/
   /* Communication.                                                */
 
   inittime = MPI_Wtime();
 
   ierr=MPI_Reduce(sendbuff,recvbuff,buffsize,MPI_DOUBLE,MPI_SUM,
                   0,MPI_COMM_WORLD);
 
   totaltime = MPI_Wtime() - inittime;
 
   /*===============================================================*/
   /* Print out after communication.                                */
   if ( taskid == 0 ){
     printf("\n");
     printf("##########################################################\n\n");
     printf("                --> AFTER COMMUNICATION <-- \n\n");
     buffsum=0.0;
     for(i=0;i<buffsize;i++){
       buffsum=buffsum+recvbuff[i];
     }
     printf(" Process %d : Sum of recvbuff elements -> %e \n",taskid,buffsum);
     printf("\n");
     printf("##########################################################\n\n");
     printf(" Communication time : %f seconds\n\n",totaltime);  
     printf("##########################################################\n\n");
   }
 
   /*===============================================================*/
   /* Free the allocated memory.                                    */
   free(recvbuff);
   free(sendbuff);
 
   /*===============================================================*/
   /* MPI finalisation.                                             */
   MPI_Finalize();
}


Outils personnels
Espaces de noms

Variantes
Actions
Navigation
Ressources de Calcul Québec
Outils
Partager