Search code examples
carraysmpidynamic-memory-allocation

MPI dynamically allocation arrays


I have a problem with a dynamically allocation of arrays.

This code, if I use a static allocation, runs without problem...

int main (int argc, char *argv[]){  

    int size, rank;

    MPI_Status status;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    int lowerBound = 0, upperBound = 0, dimArrayTemp, x, z;
    int dimBulk = size - 1, nPart, cnt;


    FILE *pf;
    pf = fopen("in.txt","r");
    int array_value = fscanf(pf,"%d",&array_value);
    float ins_array_value;

    float *arrayTemp, *bulkSum,*s,*a;

    arrayTemp =(float*)malloc(array_value*sizeof(float));
    bulkSum = (float*)malloc(array_value*sizeof(float));
    s =(float*) malloc(array_value*sizeof(float));
    a =(float*) malloc(array_value*sizeof(float));

    int j=0;

    while(!feof(pf)){
        fscanf(pf,"%f",&ins_array_value);
        a[j] = ins_array_value;
        j++;
    }
    fclose(pf); 

    float presum, valFinal;

    if(size <= array_value){
        if (rank == MASTER){
            nPart = array_value/size; 
            int countPair;
            if((array_value % size) != 0){
                countPair = 0;
            }
            for (int i = 0; i < size; i++){

                if(i == 0){
                    lowerBound = upperBound;
                    upperBound += nPart - 1; 
                }
                else{
                    lowerBound += nPart;
                    upperBound += nPart;
                    if(countPair == 0 && i == size - 1)
                        upperBound = array_value - 1;
                }
                dimArrayTemp = upperBound - lowerBound;
                //float arrayTemp[dimArrayTemp];
                for( x = lowerBound, z = 0; x <= upperBound; x++, z++){
                    arrayTemp[z] = a[x];
                }
                if (i > 0){
                    //send array size
                    MPI_Send(&z,1,MPI_INT,i,0,MPI_COMM_WORLD);
                    //send value array
                    MPI_Send(arrayTemp,z,MPI_INT,i,1,MPI_COMM_WORLD);
                }
                else{

                    for (int h = 1;h <= dimArrayTemp; h++)
                        arrayTemp[h] = arrayTemp[h-1] + arrayTemp[h]; 
                    bulkSum[0] = arrayTemp[dimArrayTemp];
                    for (int h = 0; h <= dimArrayTemp; h++)
                        s[h] = arrayTemp[h];
                }

            }       
        }
        else{
                //recieve array size
            MPI_Recv(&z,1,MPI_INT,0,0,MPI_COMM_WORLD, &status);

            MPI_Recv(arrayTemp,z,MPI_INT,0,1,MPI_COMM_WORLD,&status);
            for(int h = 1; h < z; h++){
                arrayTemp[h] = arrayTemp[h-1] + arrayTemp[h];
                presum = arrayTemp[h];
            }


            MPI_Send(&presum,1,MPI_INT,0,1,MPI_COMM_WORLD);
        }

        //MPI_Barrier(MPI_COMM_WORLD);
        if (rank == MASTER){

            for (int i = 1; i<size;i++){
                MPI_Recv(&presum,1,MPI_INT,i,1,MPI_COMM_WORLD,&status);
                bulkSum[i] = presum;
            }
            for (int i = 0; i<=dimBulk; i++){
                bulkSum[i] = bulkSum[i-1] +bulkSum[i];
            }
            for(int i = 0; i<dimBulk;i++){
                valFinal = bulkSum[i];
                cnt = i+1;
                MPI_Send(&valFinal,1,MPI_INT,cnt,1,MPI_COMM_WORLD);
            }
        }
        else{

            MPI_Recv(&valFinal,1,MPI_INT,0,1,MPI_COMM_WORLD,&status);
            for(int i = 0; i<z;i++){
                arrayTemp[i] = arrayTemp[i] + valFinal;
            }
            MPI_Send(arrayTemp,z,MPI_INT,0,1,MPI_COMM_WORLD);
        }

        if(rank == MASTER){
            for(int i =1;i<size;i++){
                MPI_Recv(arrayTemp,z,MPI_INT,i,1,MPI_COMM_WORLD,&status);
                for(int v=0, w =dimArrayTemp+1 ;v<z;v++, w++){
                    s[w] = arrayTemp[v];
                }   
                dimArrayTemp += z;
            }
            int count = 0;
            for(int c = 0;c<array_value;c++){
                printf("s[%d] = %f \n",count++,s[c]);
            }

        }
    }
    else{
        printf("ERROR!!!\t number of procs (%d) is higher than array size(%d)!\n", size, array_value);
        //fflush(stdout);
        MPI_Finalize();
    }
    free(arrayTemp);
    free(s);
    free(a);
    free(bulkSum);
    MPI_Finalize();
    return 0;   
}

This is a specific declaration of arrays:

float *arrayTemp, *bulkSum,*s,*a;

arrayTemp =(float*)malloc(array_value*sizeof(float));
bulkSum = (float*)malloc(array_value*sizeof(float));
s =(float*) malloc(array_value*sizeof(float));
a =(float*) malloc(array_value*sizeof(float));

Any ideas?

EDIT: I'm deleted reference for arrays in MPI_Send(); and MPI_Recv(); and condition master, the same error is occurred: a process exited on signal 6 (Aborted).


Solution

  • This is a very common rookie mistake. One often sees MPI tutorials where variables are passed by address to MPI calls, e.g. MPI_Send(&a, ...);. The address-of operator (&) is used to get the address of the variable and that address is passed to MPI as a buffer area for the operation. While & returns the address of the actual data storage for scalar variables and arrays, when applied to pointers it returns the address where the address pointed to is stored.

    The simplest solution is to stick to the following rule: never use & with arrays or dynamically allocated memory, e.g.:

    int a;
    MPI_Send(&a, ...);
    

    but

    int a[10];
    MPI_Send(a, ...);
    

    and

    int *a = malloc(10 * sizeof(int));
    MPI_Send(a, ...);
    

    Also, as noted by @talonmies, you are only allocating the arrays in the master process. You should remove the conditional surrounding the allocation calls.