next up previous index
Next: The Discussion Up: Diffusion Previous: Diffusion

The Code

Here is the code itself. Then I show how it's made, installed and how it's run. You have to run it on 12 processes.

/*
 * %Id: cart.c,v 1.1 2003/10/06 00:56:48 gustav Exp %
 * 
 * %Log: cart.c,v %
 * Revision 1.1  2003/10/06 00:56:48  gustav
 * Initial revision
 *
 *
 */

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>

#define FALSE 0
#define TRUE  1
#define MASTER_RANK 0

#define UPDOWN              0
#define SIDEWAYS            1
#define RIGHT               1
#define UP                  1

#define PROCESS_DIMENSIONS  2

#define PROCESS_ROWS        4
#define ROWS                4
#define DISPLAY_ROWS       16      /* must be PROCESS_ROWS * ROWS */

#define PROCESS_COLUMNS     3
#define COLUMNS             4
#define DISPLAY_COLUMNS    12      /* must be PROCESS_COLUMNS * COLUMNS */

int main ( int argc, char **argv )
{
  int pool_size, my_rank, destination, source;
  MPI_Status status;
  char char_buffer[BUFSIZ];
  int i_am_the_master = FALSE; 

  int divisions[PROCESS_DIMENSIONS] = {PROCESS_ROWS, PROCESS_COLUMNS};
  int periods[PROCESS_DIMENSIONS] = {0, 0};
  int reorder = 1;
  MPI_Comm cartesian_communicator;
  int my_cartesian_rank, my_coordinates[PROCESS_DIMENSIONS];
  int left_neighbour, right_neighbour, bottom_neighbour, top_neighbour;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &pool_size);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  if (my_rank == MASTER_RANK) i_am_the_master = TRUE;

  MPI_Cart_create ( MPI_COMM_WORLD, PROCESS_DIMENSIONS, divisions, 
		    periods, reorder, &cartesian_communicator );

  if (cartesian_communicator != MPI_COMM_NULL) {

    int matrix [ROWS][COLUMNS];
    int i, j;
    MPI_Datatype column_type;

    MPI_Comm_rank ( cartesian_communicator, &my_cartesian_rank );
    MPI_Cart_coords ( cartesian_communicator, my_cartesian_rank,
		      PROCESS_DIMENSIONS, my_coordinates );
    MPI_Cart_shift ( cartesian_communicator, SIDEWAYS, RIGHT, 
		     &left_neighbour, &right_neighbour );
    MPI_Cart_shift ( cartesian_communicator, UPDOWN, UP,
		     &bottom_neighbour, &top_neighbour );

    if (! i_am_the_master ) {
      sprintf(char_buffer, "process %2d, cartesian %2d, \
coords (%2d,%2d), left %2d, right %2d, top %2d, bottom %2d",
	      my_rank, my_cartesian_rank, my_coordinates[0],
	      my_coordinates[1], left_neighbour, right_neighbour,
	      top_neighbour, bottom_neighbour);
      MPI_Send(char_buffer, strlen(char_buffer) + 1, MPI_CHAR,
	       MASTER_RANK, 3003, MPI_COMM_WORLD);
    } 
    else {

      int number_of_c_procs, count;

      number_of_c_procs = divisions[0] * divisions[1]; 
      for (count = 0; count < number_of_c_procs - 1; count++) {
	MPI_Recv(char_buffer, BUFSIZ, MPI_CHAR, MPI_ANY_SOURCE, 3003,
		 MPI_COMM_WORLD, &status);
	printf ("%s\n", char_buffer);
      }
      printf( "process %2d, cartesian %2d, \
coords (%2d,%2d), left %2d, right %2d, top %2d, bottom %2d\n",
	      my_rank, my_cartesian_rank, my_coordinates[0],
	      my_coordinates[1], left_neighbour, right_neighbour,
	      top_neighbour, bottom_neighbour);
    }

    for ( i = 0; i < ROWS; i++ ) {
      for ( j = 0; j < COLUMNS; j++ ) {
	matrix [i][j] = my_cartesian_rank;
      }
    }

    if (my_cartesian_rank != MASTER_RANK ) 
      MPI_Send ( matrix, COLUMNS * ROWS, MPI_INT, MASTER_RANK, 3003,
		 cartesian_communicator );
    else
      collect_matrices ( cartesian_communicator, my_cartesian_rank,
			 matrix, 3003 );

    MPI_Sendrecv ( &matrix[ROWS - 2][0], COLUMNS, MPI_INT, 
		   top_neighbour, 4004,
		   &matrix[0][0], COLUMNS, MPI_INT, bottom_neighbour, 
		   4004, 
		   cartesian_communicator, &status );

    MPI_Sendrecv ( &matrix[1][0], COLUMNS, MPI_INT, bottom_neighbour,
		   5005,
		   &matrix[ROWS - 1][0], COLUMNS, MPI_INT, 
		   top_neighbour, 5005,
		   cartesian_communicator, &status );

    if (my_cartesian_rank != MASTER_RANK ) 
      MPI_Send ( matrix, COLUMNS * ROWS, MPI_INT, MASTER_RANK, 6006,
		 cartesian_communicator );
    else 
      collect_matrices ( cartesian_communicator, my_cartesian_rank,
			 matrix, 6006 );

    MPI_Type_vector (ROWS, 1, COLUMNS, MPI_INT, &column_type);
    MPI_Type_commit (&column_type);

    MPI_Sendrecv ( &matrix[0][1], 1, column_type, left_neighbour, 7007,
		   &matrix[0][COLUMNS - 1], 1, column_type, 
		   right_neighbour, 7007,
		   cartesian_communicator, &status );
 
    MPI_Sendrecv ( &matrix[0][COLUMNS - 2], 1, column_type, 
		   right_neighbour, 8008,
		   &matrix[0][0], 1, column_type, left_neighbour, 8008,
		   cartesian_communicator, &status );

    if (my_cartesian_rank != MASTER_RANK )
      MPI_Send ( matrix, COLUMNS * ROWS, MPI_INT, MASTER_RANK, 9009,
		 cartesian_communicator );
    else
      collect_matrices ( cartesian_communicator, my_cartesian_rank,
			 matrix, 9009 );
  }

  MPI_Finalize ();
  exit(0);
}

int print_array (int array [DISPLAY_ROWS] [DISPLAY_COLUMNS], 
		 int vertical_break,
		 int horizontal_break)
{
  int k, l;

  printf ("\n");
  for (k = DISPLAY_ROWS - 1; k >= 0; k -- ) {
    for (l = 0; l < DISPLAY_COLUMNS; l ++ ) {
      if (l % horizontal_break == 0) printf (" ");
      printf ( "%2d ", array [k][l] );
    }
    printf ( "\n" );
    if (k % vertical_break == 0) printf ( "\n" );
  }
}

int collect_matrices (MPI_Comm cartesian_communicator, 
                      int my_cartesian_rank,
                      int matrix[ROWS][COLUMNS], 
                      int tag)
{
  int coordinates[PROCESS_DIMENSIONS];
  int client_matrix[ROWS][COLUMNS];
  int display[DISPLAY_ROWS][DISPLAY_COLUMNS];
  int i, j, k, l, source;
  MPI_Status status;
 
  for ( i = PROCESS_ROWS - 1; i >= 0; i -- ) {
    for ( j = 0; j < PROCESS_COLUMNS; j ++ ) {
      coordinates[0] = i;
      coordinates[1] = j;
      MPI_Cart_rank ( cartesian_communicator, coordinates,
		      &source );
      if (source != my_cartesian_rank) {
	MPI_Recv ( client_matrix, BUFSIZ, MPI_INT, source, tag,
		   cartesian_communicator, &status );
	for ( k = ROWS - 1; k >= 0; k -- ) {
	  for ( l = 0; l < COLUMNS; l++ ) {
	    display [i * ROWS + k] [j * COLUMNS + l] =
	      client_matrix[k][l];
	  }
	}
      }
      else {
	for ( k = ROWS - 1; k >= 0; k -- ) {
	  for ( l = 0; l < COLUMNS; l ++ ) {
	    display [i * ROWS + k] [j * COLUMNS + l]
	      = matrix[k][l];
	  }
	}
      }
    }
  }
 
  print_array (display, ROWS, COLUMNS);
}

The Makefile for this program looks the same as the Makefile for the previous program. It simply calls mpicc to compile and link it, then install to install it:

gustav@bh1 $ pwd
/N/B/gustav/src/I590/cart
gustav@bh1 $ make install
co  RCS/Makefile,v Makefile
RCS/Makefile,v  -->  Makefile
revision 1.1
done
co  RCS/cart.c,v cart.c
RCS/cart.c,v  -->  cart.c
revision 1.1
done
mpicc -o cart cart.c
install cart /N/B/gustav/bin
gustav@bh1 $ make clobber
rm -f cart.o cart
rcsclean
rm -f Makefile
rm -f cart.c
gustav@bh1 $

As I have mentioned above, this program must be run on 12 processes. Here's how it's done:

gustav@bh1 $ mpdboot
gustav@bh1 $ mpdtrace | wc
     13      13      64
gustav@bh1 $ mpdtrace
bh1
bc29
bc30
bc31
bc32
bc33
bc36
bc35
bc34
bc40
bc38
bc37
bc39
gustav@bh1 $ mpiexec -n 12 cart
process  1, cartesian  1, coords ( 0, 1), left  0, right  2, top  4, bottom -1
process 11, cartesian 11, coords ( 3, 2), left 10, right -1, top -1, bottom  8
process 10, cartesian 10, coords ( 3, 1), left  9, right 11, top -1, bottom  7
process  8, cartesian  8, coords ( 2, 2), left  7, right -1, top 11, bottom  5
process  9, cartesian  9, coords ( 3, 0), left -1, right 10, top -1, bottom  6
process  7, cartesian  7, coords ( 2, 1), left  6, right  8, top 10, bottom  4
process  6, cartesian  6, coords ( 2, 0), left -1, right  7, top  9, bottom  3
process  5, cartesian  5, coords ( 1, 2), left  4, right -1, top  8, bottom  2
process  4, cartesian  4, coords ( 1, 1), left  3, right  5, top  7, bottom  1
process  3, cartesian  3, coords ( 1, 0), left -1, right  4, top  6, bottom  0
process  2, cartesian  2, coords ( 0, 2), left  1, right -1, top  5, bottom -1
process  0, cartesian  0, coords ( 0, 0), left -1, right  1, top  3, bottom -1

  9  9  9  9  10 10 10 10  11 11 11 11 
  9  9  9  9  10 10 10 10  11 11 11 11 
  9  9  9  9  10 10 10 10  11 11 11 11 
  9  9  9  9  10 10 10 10  11 11 11 11 

  6  6  6  6   7  7  7  7   8  8  8  8 
  6  6  6  6   7  7  7  7   8  8  8  8 
  6  6  6  6   7  7  7  7   8  8  8  8 
  6  6  6  6   7  7  7  7   8  8  8  8 

  3  3  3  3   4  4  4  4   5  5  5  5 
  3  3  3  3   4  4  4  4   5  5  5  5 
  3  3  3  3   4  4  4  4   5  5  5  5 
  3  3  3  3   4  4  4  4   5  5  5  5 

  0  0  0  0   1  1  1  1   2  2  2  2 
  0  0  0  0   1  1  1  1   2  2  2  2 
  0  0  0  0   1  1  1  1   2  2  2  2 
  0  0  0  0   1  1  1  1   2  2  2  2 


  9  9  9  9  10 10 10 10  11 11 11 11 
  9  9  9  9  10 10 10 10  11 11 11 11 
  9  9  9  9  10 10 10 10  11 11 11 11 
  6  6  6  6   7  7  7  7   8  8  8  8 

  9  9  9  9  10 10 10 10  11 11 11 11 
  6  6  6  6   7  7  7  7   8  8  8  8 
  6  6  6  6   7  7  7  7   8  8  8  8 
  3  3  3  3   4  4  4  4   5  5  5  5 

  6  6  6  6   7  7  7  7   8  8  8  8 
  3  3  3  3   4  4  4  4   5  5  5  5 
  3  3  3  3   4  4  4  4   5  5  5  5 
  0  0  0  0   1  1  1  1   2  2  2  2 

  3  3  3  3   4  4  4  4   5  5  5  5 
  0  0  0  0   1  1  1  1   2  2  2  2 
  0  0  0  0   1  1  1  1   2  2  2  2 
  0  0  0  0   1  1  1  1   2  2  2  2 


  9  9  9 10   9 10 10 11  10 11 11 11 
  9  9  9 10   9 10 10 11  10 11 11 11 
  9  9  9 10   9 10 10 11  10 11 11 11 
  6  6  6  7   6  7  7  8   7  8  8  8 

  9  9  9 10   9 10 10 11  10 11 11 11 
  6  6  6  7   6  7  7  8   7  8  8  8 
  6  6  6  7   6  7  7  8   7  8  8  8 
  3  3  3  4   3  4  4  5   4  5  5  5 

  6  6  6  7   6  7  7  8   7  8  8  8 
  3  3  3  4   3  4  4  5   4  5  5  5 
  3  3  3  4   3  4  4  5   4  5  5  5 
  0  0  0  1   0  1  1  2   1  2  2  2 

  3  3  3  4   3  4  4  5   4  5  5  5 
  0  0  0  1   0  1  1  2   1  2  2  2 
  0  0  0  1   0  1  1  2   1  2  2  2 
  0  0  0  1   0  1  1  2   1  2  2  2 

gustav@bh1 $ mpdallexit
gustav@bh1 $



Zdzislaw Meglicki
2004-04-29