Search code examples
fortranmpiintel-mpi

Get number of MPI Communicators in use


I have a large code, that crashes with the following error:

Fatal error in PMPI_Comm_split: Other MPI error, error stack:
PMPI_Comm_split(532)................: MPI_Comm_split(comm=0xc4027cf0, color=0, key=0, new_comm=0x7ffdb50f2bd0) failed
PMPI_Comm_split(508)................: fail failed
MPIR_Comm_split_impl(260)...........: fail failed
MPIR_Get_contextid_sparse_group(676): Too many communicators (0/16384 free on this process; ignore_id=0)
Fatal error in PMPI_Comm_split: Other MPI error, error stack:
PMPI_Comm_split(532)................: MPI_Comm_split(comm=0xc401bcf1, color=1, key=0, new_comm=0x7ffed5aa4fd0) failed
PMPI_Comm_split(508)................: fail failed
MPIR_Comm_split_impl(260)...........: fail failed
MPIR_Get_contextid_sparse_group(676): Too many communicators (0/16384 free on this process; ignore_id=0)
Fatal error in PMPI_Comm_split: Other MPI error, error stack:
PMPI_Comm_split(532)................: MPI_Comm_split(comm=0xc4027ce9, color=0, key=0, new_comm=0x7ffe37e477d0) failed
PMPI_Comm_split(508)................: fail failed
MPIR_Comm_split_impl(260)...........: fail failed
MPIR_Get_contextid_sparse_group(676): Too many communicators (0/16384 free on this process; ignore_id=0)
Fatal error in PMPI_Comm_split: Other MPI error, error stack:
PMPI_Comm_split(532)................: MPI_Comm_split(comm=0xc401bcf1, color=1, key=0, new_comm=0x7ffd511ac4d0) failed
PMPI_Comm_split(508)................: fail failed
MPIR_Comm_split_impl(260)...........: fail failed
MPIR_Get_contextid_sparse_group(676): Too many communicators (0/16384 free on this process; ignore_id=0)

It seems that the there is somekind of a MPI communicator leak. MPI seems to be aware of how many communicators are currently being used:

Too many communicators (0/16384 free on this process; ignore_id=0)

Is there a way to print the number of communicators used by MPI? This way I could narrow down where the communicators are leaking.


Solution

  • You can overide the implementation of MPI_Comm_split and MPI_Comm_free to manually count the creation and destruction of communicator.

    Here is a simple example

    Overriding MPI_Comm_split and MPI_Comm_free

    #include "mpi.h"
    #include "stdio.h"
    static int comm_counter=0;
    int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm)
    {
          int world_rank;
          MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
          comm_counter++;
          printf("%s %i %s %i\n", "MPI_Comm_split ", comm_counter, " from ", world_rank);
          return PMPI_Comm_split(comm, color, key, newcomm);
    }
    
    int MPI_Comm_free(MPI_Comm *comm)
    {
          int world_rank;
          MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
          comm_counter--;
          printf("%s %i %s %i\n", "PMPI_Comm_free ", comm_counter, " from ", world_rank);
          return PMPI_Comm_free(comm);
    }
    

    Compile this code to be linked. In my case I did mpicc -c comm_split.c -o comm_split.o

    Your code is left untouched. You can use it with no other modifications.
    Simple example of main program using MPI_Comm_split and MPI_Comm_free

    C++ case

    #include "mpi.h"
    int main()
    {
          MPI_Init(NULL, NULL);
          // Get the rank and size in the original communicator
          int world_rank, world_size;
          MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
          MPI_Comm_size(MPI_COMM_WORLD, &world_size);
    
          int color = world_rank / 4; // Determine color based on row
    
          // Split the communicator based on the color and use the
          // original rank for ordering
          MPI_Comm row_comm, row_comm2;
          MPI_Comm_split(MPI_COMM_WORLD, color, world_rank, &row_comm);
          MPI_Comm_split(MPI_COMM_WORLD, color, world_rank, &row_comm2);
    
          int row_rank, row_size;
          MPI_Comm_rank(row_comm, &row_rank);
          MPI_Comm_size(row_comm, &row_size);
    
          printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n",
              world_rank, world_size, row_rank, row_size);
    
          MPI_Comm_free(&row_comm);
          MPI_Finalize();
    }
    

    Fortran case

          program test
    
          include "mpif.h"
    
          integer comm_world, group_world, new_comm, new_comm2, ierr
          integer world_rank, world_size;
          integer color
    
    
          call MPI_INIT(ierr)
    
          comm_world = MPI_COMM_WORLD
    
          call MPI_Comm_rank(comm_world, world_rank, ierr);
          color = world_rank / 4
          call MPI_Comm_split(comm_world, color, world_rank, new_comm, ierr)
          call MPI_Comm_split(comm_world, color, world_rank,
         & new_comm2, ierr)
    
          call MPI_Comm_free(new_comm, ierr)
          call MPI_Finalize(ierr)
          end program
    

    Compile + link with the redefinition of MPI_Comm_split and MPI_Comm_free

    mpif77 test.f comm_split.o
    mpiCC test.cpp comm_split.o
    

    For the Fortran case you get something like

    MPI_Comm_split  1  from  3
    MPI_Comm_split  1  from  0
    MPI_Comm_split  1  from  1
    MPI_Comm_split  1  from  2
    MPI_Comm_split  2  from  0
    PMPI_Comm_free  1  from  0
    MPI_Comm_split  2  from  1
    PMPI_Comm_free  1  from  1
    MPI_Comm_split  2  from  2
    PMPI_Comm_free  1  from  2
    MPI_Comm_split  2  from  3
    PMPI_Comm_free  1  from  3
    

    Which gives you an information about the number of communicators involved in each process.