Scatterv

De Wiki de Calcul Québec
Aller à : Navigation, rechercher
Cette page est une traduction de la page Scatterv et la traduction est complétée à 100 % et à jour.

Autres langues :anglais 100% • ‎français 100%

This example uses MPI_Scatterv to distribute data to all available processes. This example is similar to example 7 except that only portions of the vectors are transmitted. This feature allows for a better control of the amount of data to be transfered, and so help reduce communication time.

Process 0 prepares a set of vectors (of size buffsize) of real random numbers per process (one for each process including process 0), and places them contiguously in memory in the vector sendbuff. Then portions of each of the vectors are sent to their corresponding process using MPI_Scatterv.

Before communication the sum for each vector is printed out by process 0 for verification. After communication, each process computes the sum for the portion of the vector it received, and those sums are returned to process 0 using the MPI_Gather function, for screen output together with the total communication time.

In Fortran

File : scatterv.f
!--------------------------------------------------------
!   
!                 <----- sendbuff ------>
!   
!          ####################################
!          #      #      #      #      #      #
!        0 # ABCD # EFGH # IJKL # MNOP # QRST #
!          #      #      #      #      #      #
!          ####################################
!     T    #      #      #      #      #      #
!        1 #      #      #      #      #      #
!     a    #      #      #      #      #      #
!          ####################################
!     s    #      #      #      #      #      #
!        2 #      #      #      #      #      #       BEFORE
!     k    #      #      #      #      #      #
!          ####################################
!     s    #      #      #      #      #      #
!        3 #      #      #      #      #      #
!          #      #      #      #      #      #
!          ####################################
!          #      #      #      #      #      #
!        4 #      #      #      #      #      #
!          #      #      #      #      #      #
!          ####################################
!   
!          recvbuff
!   
!          ########
!          #      #
!        0 # AB   #
!          #      #
!          ########
!     T    #      #
!        1 # EFG  #
!     a    #      #
!          ########
!     s    #      #
!        2 # I    #                                   AFTER
!     k    #      #
!          ########
!     s    #      #
!        3 # MNOP #
!          #      #
!          ########
!          #      #
!        4 # QRS  #
!          #      #
!          ########
!
! Author: Carol Gauthier
!         Centre de calcul scientifique
!         Université de Sherbrooke
!
! Last revision: August 2004
!--------------------------------------------------------
 
Program Example_MPI
 
  include 'mpif.h'
  integer ierr,ntasks,taskid,itask,status(MPI_STATUS_SIZE)
  integer i,j,k,buffsize,displs(0:2048),sendcounts(0:2048),recvcount
  character argtmp*12
  real(8) inittime,recvtime,totaltime,rand,buffsum
 
  real(8),allocatable,dimension(:) :: buffsums
 
  real(8),allocatable,dimension(:,:) :: sendbuff
  real(8),allocatable,dimension(:) :: recvbuff
 
  !---------------------------------------------------------------
  ! MPI Initialisation. It's important to put this call at the
  ! beginning of the program, after variable declarations.
  call MPI_INIT( ierr )
 
  !---------------------------------------------------------------
  ! Get the number of MPI processes and the taskid of this process.
  call MPI_COMM_SIZE(MPI_COMM_WORLD,ntasks,ierr)
  call MPI_COMM_RANK(MPI_COMM_WORLD,taskid,ierr)
 
  !---------------------------------------------------------------
  ! Get buffsize value from program arguments.
  call getarg(1,argtmp)
  read(argtmp,'(I12)')buffsize
 
  !---------------------------------------------------------------
   ! Printing out the description of the example.
  if ( taskid.eq.0 )then
    write(6,'(A)')
    write(6,'(A)')"##########################################################"
    write(6,'(A)')
    write(6,'(A)')" Example 8"
    write(6,'(A)')
    write(6,'(A)')" Collective Communication : MPI_Scatterv"
    write(6,'(A)')
    write(6,'(A,I12)')" Vector size:",buffsize
    write(6,'(A,I5)')" Number of processes:",ntasks
    write(6,'(A)')
    write(6,'(A)')"##########################################################"
    write(6,'(A)')
    write(6,'(A)')"                --> BEFORE COMMUNICATION <--"
    write(6,'(A)')
  endif
 
   !---------------------------------------------------------------
   ! Memory allocation. 
   allocate( sendbuff(0:buffsize-1,0:ntasks-1) )
   allocate( recvbuff(0:buffsize-1) )
   allocate( buffsums(0:ntasks-1) )
 
   if ( taskid.eq.0 )then
 
    !-----------------------------------------------------------------
    ! Vectors and matrices initialisation.
    do itask=0,ntasks-1
      displs(itask)=itask*buffsize
      sendcounts(itask)=buffsize/(itask+1)
      do i=0,sendcounts(itask)-1
        sendbuff(i,itask)=rand()
      end do
    end do
 
   !-----------------------------------------------------------------
   ! Print out before communication.
   do itask=1,ntasks-1
      buffsum=0.0
      do i=0,sendcounts(itask)-1
        buffsum=buffsum+sendbuff(i,itask)
      end do
      write(6,'(A,I3,A,I3,A,E14.8,A,I9)')"Process",taskid,": Vector sent to ", & 
      &                              itask,": sum= ",buffsum," size=",sendcounts(itask)
   end do
 
  end if
 
  !-----------------------------------------------------------------
  ! Size of received vector.
  recvcount=buffsize/(taskid+1)
 
   !-----------------------------------------------------------------
   ! Communication
 
   inittime = MPI_Wtime()
 
   call MPI_Scatterv(sendbuff,sendcounts,displs,MPI_REAL8, &
   &                 recvbuff,recvcount,MPI_REAL8, &
   &                 0,MPI_COMM_WORLD,ierr)
 
   totaltime = MPI_Wtime()
 
   !-----------------------------------------------------------------
   ! Print out after communication.
 
   buffsum=0.0
   do i=0,recvcount-1
    buffsum=buffsum+recvbuff(i)
   end do
 
   call MPI_Gather(buffsum,1,MPI_REAL8,buffsums,1,MPI_REAL8, &
   &               0,MPI_COMM_WORLD,ierr)
 
   if(taskid.eq.0)then
    write(6,*)
    write(6,'(A)')"##########################################################"
    write(6,*)
    write(6,'(A)')"                --> AFTER COMMUNICATION <--"
    write(6,*)
    do itask=1,ntasks-1
      write(6,'(A,I3,A,E14.8)')"Process ",itask,": Sum of vector received = ",buffsums(itask)
    end do
    write(6,*)
    write(6,'(A)')"##########################################################"
    write(6,'(A,F5.2,A)')" Total communication time: ",totaltime," seconds"
    write(6,'(A)')"##########################################################"
    write(6,*)
   end if
 
 
  !-----------------------------------------------------------------
  ! Free the allocated memory
  deallocate(sendbuff)
  deallocate(recvbuff)
  deallocate(buffsums)
 
  !-----------------------------------------------------------------
  ! MPI finalisation
 
  call MPI_FINALIZE( ierr )
 
end


In C

File : scatterv.c
/*--------------------------------------------------------
 
                 <----- sendbuff ------>
 
          ####################################
          #      #      #      #      #      #
        0 # ABCD # EFGH # IJKL # MNOP # QRST #
          #      #      #      #      #      #
          ####################################
     T    #      #      #      #      #      #
        1 #      #      #      #      #      #
     a    #      #      #      #      #      #
          ####################################
     s    #      #      #      #      #      #
        2 #      #      #      #      #      #       BEFORE
     k    #      #      #      #      #      #
          ####################################
     s    #      #      #      #      #      #
        3 #      #      #      #      #      #
          #      #      #      #      #      #
          ####################################
          #      #      #      #      #      #
        4 #      #      #      #      #      #
          #      #      #      #      #      #
          ####################################
 
          recvbuff
 
          ########
          #      #
        0 # AB   #
          #      #
          ########
     T    #      #
        1 # EFG  #
     a    #      #
          ########
     s    #      #
        2 # I    #                                   AFTER
     k    #      #
          ########
     s    #      #
        3 # MNOP #
          #      #
          ########
          #      #
        4 # QRS  #
          #      #
          ########
 
 Author: Carol Gauthier
         Centre de calcul scientifique
         Université de Sherbrooke
 
 Last revision: September 2005
--------------------------------------------------------*/
 
#include <malloc.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "math.h"
#include "mpi.h"
 
int main(int argc,char** argv)
{
   /*===============================================================*/
   /* Declaration of variables                                     */
   int          taskid, ntasks;
   MPI_Status   status;
   int          ierr,i,j,itask;
   int	        buffsize,sendcounts[2048],displs[2048],recvcount;
   double       **sendbuff,*recvbuff,buffsum,buffsums[2048];
   double       inittime,totaltime,recvtime,recvtimes[2048];
 
   /*===============================================================*/
   /* MPI Initialisation. It is important to put this call at the   */
   /* beginning of the program, after variable declarations.        */
   MPI_Init(&argc, &argv);
 
   /*===============================================================*/
   /* Get the number of MPI processes and the taskid of this process.      */
   MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
   MPI_Comm_size(MPI_COMM_WORLD,&ntasks);
 
   /*===============================================================*/
   /* Get buffsize value from program arguments.                    */
   buffsize=atoi(argv[1]);
 
   /*===============================================================*/
   /* Printing out the description of the example.                  */
   if ( taskid == 0 ){
     printf("\n\n\n");
     printf("##########################################################\n\n");
     printf(" Example 8 \n\n");
     printf(" Collective Communication : MPI_Scatterv \n\n");
     printf(" Vector size: %d\n",buffsize);
     printf(" Number of processes: %d\n\n",ntasks);
     printf("##########################################################\n\n");
     printf("                --> BEFORE COMMUNICATION <--\n\n");
   }
 
   /*=============================================================*/
   /* Memory allocation.                                          */ 
   recvbuff=(double *)malloc(sizeof(double)*buffsize);
   if ( taskid == 0 ){
     sendbuff=(double **)malloc(sizeof(double *)*ntasks);
     sendbuff[0]=(double *)malloc(sizeof(double)*ntasks*buffsize);
     for(i=1;i<ntasks;i++)sendbuff[i]=sendbuff[i-1]+buffsize;
   }
   else{
     sendbuff=(double **)malloc(sizeof(double *)*1);
     sendbuff[0]=(double *)malloc(sizeof(double)*1);
   }
 
   if ( taskid == 0 ){
 
     /*=============================================================*/
     /* Vectors and matrices initialisation.                        */
 
     srand((unsigned)time( NULL ) + taskid);
     for(itask=0;itask<ntasks;itask++){
       displs[itask]=itask*buffsize;
       sendcounts[itask]=buffsize/(itask+1);
       for(i=0;i<sendcounts[itask];i++){
         sendbuff[itask][i]=(double)rand()/RAND_MAX;
       }
     }
 
     /*==============================================================*/
     /* Print out before communication.                              */
     for(itask=0;itask<ntasks;itask++){
       buffsum=0.0;
       for(i=0;i<sendcounts[itask];i++){
         buffsum=buffsum+sendbuff[itask][i];
       }
       printf("Processus %d : Vecteur envoyé à %d : somme=%e taille= %d\n",
               taskid,itask,buffsum,sendcounts[itask]);
 
     }  
 
   }
 
   /*=================================================================*/
   /* Size of received vector.                                        */
   recvcount=buffsize/(taskid+1);
 
   /*===============================================================*/
   /* Communication.                                                */
 
   inittime = MPI_Wtime();
 
   ierr=MPI_Scatterv(sendbuff[0],sendcounts,displs,MPI_DOUBLE,
                     recvbuff,recvcount,MPI_DOUBLE,
                     0,MPI_COMM_WORLD);
 
   totaltime = MPI_Wtime() - inittime;
 
   /*===============================================================*/
   /* Print out after communication.                                */
 
   buffsum=0.0;
   for(i=0 ; i<recvcount ; i++){
     buffsum=buffsum+recvbuff[i];
   }
 
   ierr=MPI_Gather(&buffsum,1,MPI_DOUBLE,
                   buffsums,1, MPI_DOUBLE,
                   0,MPI_COMM_WORLD);
 
   if(taskid==0){
     printf("\n");
     printf("##########################################################\n\n");
     printf("                --> AFTER COMMUNICATION <-- \n\n");
     for(itask=0;itask<ntasks;itask++){
       printf("Process %d: Sum of received vector: %e\n",
               itask,buffsums[itask]);
     }  
     printf("\n");
     printf("##########################################################\n\n");
     printf(" Communication time : %f seconds\n\n",totaltime);  
     printf("##########################################################\n\n");
   }
 
   /*===============================================================*/
   /* Free the allocated memory.                                    */
   if ( taskid == 0 ){
     free(sendbuff[0]);
     free(sendbuff);
   }
   else{
     free(sendbuff[0]);
     free(sendbuff);
     free(recvbuff);
   }
 
   /*===============================================================*/
   /* MPI finalisation.                                             */
   MPI_Finalize();
 
}


Outils personnels
Espaces de noms

Variantes
Actions
Navigation
Ressources de Calcul Québec
Outils
Partager