# 将2D阵列的分布式块发送到MPI的根过程[英] Sending distributed chunks of a 2D array to the root process in MPI

### 问题描述

i有一个2D数组，该数组分布在MPI过程网格上(此示例中的3 x 2个进程).数组的值是在数组的分布的过程中生成的，我想在根过程中将所有这些块聚集在一起以显示它们.

```#include<stdio.h>
#include<array_alloc.h>
#include<math.h>
#include<mpi.h>

int main(int argc, char ** argv)
{
int size, rank;
int dim_size[2];
int periods[2];
int A = 2;
int B = 3;
MPI_Comm cart_comm;
MPI_Datatype block_type;
int coords[2];

float **array;
float **whole_array;

int n = 10;
int rows_per_core;
int cols_per_core;
int i, j;

int x_start, x_finish;
int y_start, y_finish;

/* Initialise MPI */
MPI_Init(&argc, &argv);

/* Get the rank for this process, and the number of processes */
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);

if (rank == 0)
{
/* If we're the master process */
whole_array = alloc_2d_float(n, n);

/* Initialise whole array to silly values */
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
whole_array[i][j] = 9999.99;
}
}

for (j = 0; j < n; j ++)
{
for (i = 0; i < n; i++)
{
printf("%f ", whole_array[j][i]);
}
printf("\n");
}
}

/* Create the cartesian communicator */
dim_size[0] = B;
dim_size[1] = A;
periods[0] = 1;
periods[1] = 1;

MPI_Cart_create(MPI_COMM_WORLD, 2, dim_size, periods, 1, &cart_comm);

/* Get our co-ordinates within that communicator */
MPI_Cart_coords(cart_comm, rank, 2, coords);

rows_per_core = ceil(n / (float) A);
cols_per_core = ceil(n / (float) B);

if (coords[0] == (B - 1))
{
/* We're at the far end of a row */
cols_per_core = n - (cols_per_core * (B - 1));
}
if (coords[1] == (A - 1))
{
/* We're at the bottom of a col */
rows_per_core = n - (rows_per_core * (A - 1));
}

printf("X: %d, Y: %d, RpC: %d, CpC: %d\n", coords[0], coords[1], rows_per_core, cols_per_core);

MPI_Type_vector(rows_per_core, cols_per_core, cols_per_core + 1, MPI_FLOAT, &block_type);
MPI_Type_commit(&block_type);

array = alloc_2d_float(rows_per_core, cols_per_core);

if (array == NULL)
{
printf("Problem with array allocation.\nExiting\n");
return 1;
}

for (j = 0; j < rows_per_core; j++)
{
for (i = 0; i < cols_per_core; i++)
{
array[j][i] = (float) (i + 1);
}
}

MPI_Barrier(MPI_COMM_WORLD);

MPI_Gather(array, 1, block_type, whole_array, 1, block_type, 0, MPI_COMM_WORLD);

/*
if (rank == 0)
{
for (j = 0; j < n; j ++)
{
for (i = 0; i < n; i++)
{
printf("%f ", whole_array[j][i]);
}
printf("\n");
}
}
*/
/* Close down the MPI environment */
MPI_Finalize();
}
```

```float **alloc_2d_float( int ndim1, int ndim2 ) {

float **array2 = malloc( ndim1 * sizeof( float * ) );

int i;

if( array2 != NULL ){

array2[0] = malloc( ndim1 * ndim2 * sizeof( float ) );

if( array2[ 0 ] != NULL ) {

for( i = 1; i < ndim1; i++ )
array2[i] = array2[0] + i * ndim2;

}

else {
free( array2 );
array2 = NULL;
}

}

return array2;

}
```

.

.