Search code examples
javampiopenmpi

OpenMPI Java Bindings Behavior Difference for lock, accumulate, get


I have a case where I need to use Java on our research cluster with MPI. One particular function I needed is nicely covered in this question (C++ code included in the linked answer). I built the C++ code and it works exactly as intended.

I tried to build the Java equivalent to this code, but have failed miserably. Even though functionally I have replicated what the C++ code does, the Java version does not consistently return the desired results.

mpiexec --oversubscribe -n 4 ./test

0 got counter 1
2 got counter 2
1 got counter 3
3 got counter 4
 1  1  1  1 

(Running with --oversubscribe on my local laptop.)

When I run my Java equivalent, I do not get anywhere near the same result:

mpirun --oversubscribe -n 4 java -cp .:/usr/local/lib/openmpi/mpi.jar CounterTest

0 got counter 1
3 got counter 1
1 got counter 3
2 got counter 2
 1  1  1  1 

I would expect that each rank gets one and only one counter. This run, the counter 1 was used twice. Once in a blue moon I can get it to deliver me 1 - 4 (order is unimportant; unique count is).

We run version 2.1.0 on our cluster. On my local laptop I have OpenMPI 2.1.0 and 3.1.0 (current) installed and I can reproduce the proper behavior of the C++ program and the misbehavior of the Java program on either version.

Here is the Counter class I created:

import java.nio.ByteBuffer;
import java.util.ArrayList;

import mpi.MPI;
import mpi.MPIException;
import mpi.Win;

public class Counter {
    Win win;
    int hostRank;
    int myVal;
    ByteBuffer data;
    int rank;
    int size;

    public Counter(int hostRank) throws MPIException {
        this.setHostRank(hostRank);
        this.setSize(MPI.COMM_WORLD.getSize());
        this.setRank(MPI.COMM_WORLD.getRank());

        if (this.getRank() == hostRank) {
//          this.setData(MPI.newByteBuffer(this.getSize() * Integer.BYTES));
            this.setData(ByteBuffer.allocateDirect(this.getSize() * Integer.BYTES));
            for (int i = 0; i < this.getData().capacity(); i += Integer.BYTES)
                this.getData().putInt(i, 0);
        } else {
//          this.setData(MPI.newByteBuffer(0));
            this.setData(ByteBuffer.allocateDirect(0));
        }   

        this.setWin(new Win(this.getData(), this.getData().capacity(), Integer.BYTES,
                MPI.INFO_NULL, MPI.COMM_WORLD));

        this.setMyVal(0);
    }

    public int increment(int increment) throws MPIException {

        // A list to store all of the values we pull
        ArrayList<Integer> vals = new ArrayList<Integer>();
        for (int i = 0; i < this.getSize(); i++)
            vals.add(i, 0);

        // Need to convert the increment to a buffer
        ByteBuffer incrbuff = ByteBuffer.allocateDirect(Integer.BYTES);
        incrbuff.putInt(increment);

        // Our values are returned to us in a byte buffer
        ByteBuffer valBuff = ByteBuffer.allocateDirect(Integer.BYTES);

//      System.out.printf("Data for RANK %d: ", this.getRank());
        this.getWin().lock(MPI.LOCK_EXCLUSIVE, this.getHostRank(), 0);
        for (int i = 0; i < this.getSize(); i++) {
            // Always ensure that we're at the top of the buffer
            valBuff.position(0);
            if (i == this.getRank()) {
                this.getWin().accumulate(incrbuff, 1, MPI.INT, this.getHostRank(), i, 1, MPI.INT, MPI.SUM);
                // Without this, it comes back all 1s 
                this.getWin().flushLocalAll();
//              System.out.printf(" [%d] ", this.getMyVal() + increment);
            } else {
                this.getWin().get(valBuff, 1, MPI.INT, this.getHostRank(), i, 1, MPI.INT);
                vals.set(i, valBuff.getInt(0));
//              System.out.printf("  %d  ", vals.get(i))
            }
        }
        this.getWin().unlock(this.getHostRank());

        this.setMyVal(this.getMyVal() + increment);
        vals.set(this.getRank(), this.getMyVal());

//      System.out.printf(" <<%d>> \n", vals.stream().mapToInt(Integer::intValue).sum());
//      this.getWin().unlock(this.getHostRank());

        return vals.stream().mapToInt(Integer::intValue).sum();

    }

    public void printCounter() {
        if (this.getRank() == this.getHostRank()) {
            for (int i = 0; i < this.getSize(); i++) {
                System.out.printf(" %d ", this.getData().getInt());
            }
            System.out.println("");
        }
    }

    public void delete() throws MPIException {
        this.getWin().detach(this.getData());
        this.getWin().free();

        this.setData(null);
        this.setHostRank(0);
        this.setMyVal(0);
        this.setRank(0);
        this.setSize(0);
        this.setWin(null);

    }

    private Win getWin() {
        return win;
    }

    private void setWin(Win win) {
        this.win = win;
    }

    private int getHostRank() {
        return hostRank;
    }

    private void setHostRank(int hostrank) {
        this.hostRank = hostrank;
    }

    private int getMyVal() {
        return myVal;
    }

    private void setMyVal(int myval) {
        this.myVal = myval;
    }

    private ByteBuffer getData() {
        return data;
    }

    private void setData(ByteBuffer data) {
        this.data = data;
    }

    private int getRank() {
        return rank;
    }

    private void setRank(int rank) {
        this.rank = rank;
    }

    private int getSize() {
        return size;
    }

    private void setSize(int size) {
        this.size = size;
    }

}

It should also be noted that the Java code includes something that the C++ code does not:

this.getWin().flushLocalAll();

Without this, counter would be "1" for every rank.

Here also is the first part of the test class:

import java.util.Random;

import mpi.*;

public class CounterTest {

    public static void main(String[] args) {

        try {
            MPI.Init(args);
        } catch (MPIException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        try {
            test1();
//          test2();
        } catch (MPIException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        try {
            MPI.Finalize();
        } catch (MPIException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

    }

    public static void test1 () throws MPIException {
        Counter c = new Counter(0);
        int rank = MPI.COMM_WORLD.getRank();
        int size = MPI.COMM_WORLD.getSize();

        int result = c.increment(1);
        System.out.printf("%d got counter %d\n", rank, result);

        MPI.COMM_WORLD.barrier();
        c.printCounter();
        c.delete();
        c = null;                       

    }
}

I've tried various other techniques, in terms of attempting to fence, using a group in order to use MPI_Win_start() and MPI_Win_complete(), but to no avail. I feel that this is as close to a true representation of the original C++ code I can get.

What am I missing? Why does this not behave the same as the native C++ code?

EDIT: I also find that I need to add this when running this against the actual cluster (it was down for maintenance the last two days):

this.getWin().flush(0);

Solution

  • I think the issue is with these lines

    this.getWin().get(valBuff, 1, MPI.INT, this.getHostRank(), i, 1, MPI.INT);
    vals.set(i, valBuff.getInt(0));
    

    My understanding is you cannot assume the content of valBuff is correct before MPI_Win_unlock() has been called.

    I rewrote the subroutine by using several buffers, and setting vals after MPI_Win_unlock() and was able to get correct output.

    public int increment(int increment) throws MPIException {
    
        // A list to store all of the values we pull
        ArrayList<Integer> vals = new ArrayList<Integer>();
        for (int i = 0; i < this.getSize(); i++)
            vals.add(i, 0);
    
        // Need to convert the increment to a buffer
        ByteBuffer incrbuff = ByteBuffer.allocateDirect(Integer.BYTES);
        incrbuff.putInt(increment);
    
        // Our values are returned to us in several byte buffers
        ByteBuffer valBuff[] = new ByteBuffer[this.getSize()];
    
        this.getWin().lock(MPI.LOCK_EXCLUSIVE, this.getHostRank(), 0);
        for (int i = 0; i < this.getSize(); i++) {
            // Always ensure that we're at the top of the buffer
            if (i == this.getRank()) {
                this.getWin().accumulate(incrbuff, 1, MPI.INT, this.getHostRank(), i, 1, MPI.INT, MPI.SUM);
            } else {
                valBuff[i] = ByteBuffer.allocateDirect(Integer.BYTES);
                valBuff[i].position(0);
                this.getWin().get(valBuff[i], 1, MPI.INT, this.getHostRank(), i, 1, MPI.INT);
            }
        }
        this.getWin().unlock(this.getHostRank());
        for (int i = 0; i < this.getSize(); i++) {
            if (i != this.getRank()) {
                vals.set(i, valBuff[i].getInt(0));
            }
        }
    
        this.setMyVal(this.getMyVal() + increment);
        vals.set(this.getRank(), this.getMyVal());
    
        return vals.stream().mapToInt(Integer::intValue).sum();
    
    }
    

    Note there is no more need for

    this.getWin().flushLocalAll();
    this.getWin().flush(0);
    

    FWIW, I tried to use a single buffer of this.getSize() integers but was unable to get something work.