8#include "../mpiwrap/cp_mpi.h"
9#include "../offload/offload_mempool.h"
16#define DBM_MULTIPLY_COMM_MEMPOOL
23static int gcd(
const int a,
const int b) {
34static int lcm(
const int a,
const int b) {
return (a * b) / gcd(a, b); }
40static inline int isum(
const int n,
const int input[n]) {
42 for (
int i = 0;
i < n;
i++) {
52static inline void icumsum(
const int n,
const int input[n],
int output[n]) {
54 for (
int i = 1;
i < n;
i++) {
55 output[
i] = output[
i - 1] + input[
i - 1];
79 const int npacks,
plan_t *plans_per_pack[npacks],
80 int nblks_per_pack[npacks],
81 int ndata_per_pack[npacks]) {
83 memset(nblks_per_pack, 0, npacks *
sizeof(
int));
84 memset(ndata_per_pack, 0, npacks *
sizeof(
int));
89 int nblks_mythread[npacks];
90 memset(nblks_mythread, 0, npacks *
sizeof(
int));
91#pragma omp for schedule(static)
94 for (
int iblock = 0; iblock < shard->
nblocks; iblock++) {
96 const int sum_index = (trans_matrix) ? blk->
row : blk->
col;
97 const int itick = (1021 * sum_index) % nticks;
98 const int ipack = itick / dist_ticks->
nranks;
99 nblks_mythread[ipack]++;
105 for (
int ipack = 0; ipack < npacks; ipack++) {
106 nblks_per_pack[ipack] += nblks_mythread[ipack];
107 nblks_mythread[ipack] = nblks_per_pack[ipack];
111 for (
int ipack = 0; ipack < npacks; ipack++) {
112 const int nblks = nblks_per_pack[ipack];
113 plans_per_pack[ipack] = malloc(nblks *
sizeof(
plan_t));
114 assert(plans_per_pack[ipack] != NULL || nblks == 0);
118 int ndata_mythread[npacks];
119 memset(ndata_mythread, 0, npacks *
sizeof(
int));
120#pragma omp for schedule(static)
123 for (
int iblock = 0; iblock < shard->
nblocks; iblock++) {
125 const int free_index = (trans_matrix) ? blk->
col : blk->
row;
126 const int sum_index = (trans_matrix) ? blk->
row : blk->
col;
127 const int itick = (1021 * sum_index) % nticks;
128 const int ipack = itick / dist_ticks->
nranks;
130 const int coord_free_idx = dist_indices->
index2coord[free_index];
131 const int coord_sum_idx = itick % dist_ticks->
nranks;
132 const int coords[2] = {(trans_dist) ? coord_sum_idx : coord_free_idx,
133 (trans_dist) ? coord_free_idx : coord_sum_idx};
137 ndata_mythread[ipack] += row_size * col_size;
139 const int iplan = --nblks_mythread[ipack];
140 plans_per_pack[ipack][iplan].blk = blk;
141 plans_per_pack[ipack][iplan].rank = rank;
142 plans_per_pack[ipack][iplan].row_size = row_size;
143 plans_per_pack[ipack][iplan].col_size = col_size;
147 for (
int ipack = 0; ipack < npacks; ipack++) {
148 ndata_per_pack[ipack] += ndata_mythread[ipack];
158 const dbm_matrix_t *matrix,
const bool trans_matrix,
const int nblks_send,
159 const int ndata_send,
plan_t plans[nblks_send],
const int nranks,
160 int blks_send_count[nranks],
int data_send_count[nranks],
161 int blks_send_displ[nranks],
int data_send_displ[nranks],
164 memset(blks_send_count, 0, nranks *
sizeof(
int));
165 memset(data_send_count, 0, nranks *
sizeof(
int));
170 int nblks_mythread[nranks], ndata_mythread[nranks];
171 memset(nblks_mythread, 0, nranks *
sizeof(
int));
172 memset(ndata_mythread, 0, nranks *
sizeof(
int));
173#pragma omp for schedule(static)
174 for (
int iblock = 0; iblock < nblks_send; iblock++) {
175 const plan_t *plan = &plans[iblock];
176 nblks_mythread[plan->
rank] += 1;
182 for (
int irank = 0; irank < nranks; irank++) {
183 blks_send_count[irank] += nblks_mythread[irank];
184 data_send_count[irank] += ndata_mythread[irank];
185 nblks_mythread[irank] = blks_send_count[irank];
186 ndata_mythread[irank] = data_send_count[irank];
193 icumsum(nranks, blks_send_count, blks_send_displ);
194 icumsum(nranks, data_send_count, data_send_displ);
195 const int m = nranks - 1;
196 assert(nblks_send == blks_send_displ[m] + blks_send_count[m]);
197 assert(ndata_send == data_send_displ[m] + data_send_count[m]);
202#pragma omp for schedule(static)
203 for (
int iblock = 0; iblock < nblks_send; iblock++) {
204 const plan_t *plan = &plans[iblock];
208 const double *blk_data = &shard->
data[blk->
offset];
210 const int plan_size = row_size * col_size;
211 const int irank = plan->
rank;
216 nblks_mythread[irank] -= 1;
217 ndata_mythread[irank] -= plan_size;
218 const int offset = data_send_displ[irank] + ndata_mythread[irank];
219 const int jblock = blks_send_displ[irank] + nblks_mythread[irank];
224 for (
int i = 0;
i < row_size;
i++) {
225 for (
int j = 0; j < col_size; j++) {
226 const double element = blk_data[j * row_size +
i];
227 data_send[offset +
i * col_size + j] = element;
228 norm += element * element;
231 blks_send[jblock].free_index = plan->
blk->
col;
232 blks_send[jblock].sum_index = plan->
blk->
row;
234 for (
int i = 0;
i < plan_size;
i++) {
235 const double element = blk_data[
i];
236 data_send[offset +
i] = element;
237 norm += element * element;
239 blks_send[jblock].free_index = plan->
blk->
row;
240 blks_send[jblock].sum_index = plan->
blk->
col;
242 blks_send[jblock].norm = (float)norm;
245 blks_send[jblock].offset = offset - data_send_displ[irank];
265 const int nranks,
const int nshards,
const int nblocks_recv,
266 const int blks_recv_count[nranks],
const int blks_recv_displ[nranks],
267 const int data_recv_displ[nranks],
270 int nblocks_per_shard[nshards], shard_start[nshards];
271 memset(nblocks_per_shard, 0, nshards *
sizeof(
int));
274 assert(blocks_tmp != NULL || nblocks_recv == 0);
279 for (
int irank = 0; irank < nranks; irank++) {
281 for (
int i = 0;
i < blks_recv_count[irank];
i++) {
282 blks_recv[blks_recv_displ[irank] +
i].offset += data_recv_displ[irank];
287 int nblocks_mythread[nshards];
288 memset(nblocks_mythread, 0, nshards *
sizeof(
int));
289#pragma omp for schedule(static)
290 for (
int iblock = 0; iblock < nblocks_recv; iblock++) {
291 blocks_tmp[iblock] = blks_recv[iblock];
292 const int ishard = blks_recv[iblock].
free_index % nshards;
293 nblocks_mythread[ishard]++;
296 for (
int ishard = 0; ishard < nshards; ishard++) {
297 nblocks_per_shard[ishard] += nblocks_mythread[ishard];
298 nblocks_mythread[ishard] = nblocks_per_shard[ishard];
302 icumsum(nshards, nblocks_per_shard, shard_start);
304#pragma omp for schedule(static)
305 for (
int iblock = 0; iblock < nblocks_recv; iblock++) {
306 const int ishard = blocks_tmp[iblock].
free_index % nshards;
307 const int jblock = --nblocks_mythread[ishard] + shard_start[ishard];
308 blks_recv[jblock] = blocks_tmp[iblock];
313 for (
int ishard = 0; ishard < nshards; ishard++) {
314 if (nblocks_per_shard[ishard] > 1) {
315 qsort(&blks_recv[shard_start[ishard]], nblocks_per_shard[ishard],
329 const bool trans_dist,
338 const dbm_dist_1d_t *dist_indices = (trans_dist) ? &dist->cols : &dist->rows;
339 const dbm_dist_1d_t *dist_ticks = (trans_dist) ? &dist->rows : &dist->cols;
342 const int nsend_packs = nticks / dist_ticks->
nranks;
343 assert(nsend_packs * dist_ticks->
nranks == nticks);
349 assert(packed.
send_packs != NULL || nsend_packs == 0);
352 plan_t *plans_per_pack[nsend_packs];
353 int nblks_send_per_pack[nsend_packs], ndata_send_per_pack[nsend_packs];
355 dist_ticks, nticks, nsend_packs, plans_per_pack,
356 nblks_send_per_pack, ndata_send_per_pack);
359 int nblks_send_max = 0, ndata_send_max = 0;
360 for (
int ipack = 0; ipack < nsend_packs; ++ipack) {
361 nblks_send_max =
imax(nblks_send_max, nblks_send_per_pack[ipack]);
362 ndata_send_max =
imax(ndata_send_max, ndata_send_per_pack[ipack]);
369 for (
int ipack = 0; ipack < nsend_packs; ipack++) {
371 const int nranks = dist->nranks;
372 int blks_send_count[nranks], data_send_count[nranks];
373 int blks_send_displ[nranks], data_send_displ[nranks];
375 ndata_send_per_pack[ipack], plans_per_pack[ipack], nranks,
376 blks_send_count, data_send_count, blks_send_displ,
377 data_send_displ, blks_send, data_send);
378 free(plans_per_pack[ipack]);
381 int blks_recv_count[nranks], blks_recv_displ[nranks];
383 icumsum(nranks, blks_recv_count, blks_recv_displ);
384 const int nblocks_recv =
isum(nranks, blks_recv_count);
389 int blks_send_count_byte[nranks], blks_send_displ_byte[nranks];
390 int blks_recv_count_byte[nranks], blks_recv_displ_byte[nranks];
391 for (
int i = 0;
i < nranks;
i++) {
398 blks_recv, blks_recv_count_byte, blks_recv_displ_byte,
403 int data_recv_count[nranks], data_recv_displ[nranks];
405 icumsum(nranks, data_recv_count, data_recv_displ);
406 const int ndata_recv =
isum(nranks, data_recv_count);
409#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
416 data_recv, data_recv_count, data_recv_displ,
421 blks_recv_count, blks_recv_displ,
422 data_recv_displ, blks_recv);
434 int max_nblocks = 0, max_data_size = 0;
435 for (
int ipack = 0; ipack < packed.
nsend_packs; ipack++) {
445#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
466 const int itick_of_rank0 = (itick + nticks - my_rank) % nticks;
467 const int send_rank = (my_rank + nticks - itick_of_rank0) % nranks;
468 const int send_itick = (itick_of_rank0 + send_rank) % nticks;
469 const int send_ipack = send_itick / nranks;
470 assert(send_itick % nranks == my_rank);
473 const int recv_rank = itick % nranks;
474 const int recv_ipack = itick / nranks;
477 if (send_rank == my_rank) {
478 assert(send_rank == recv_rank && send_ipack == recv_ipack);
518#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
523 for (
int ipack = 0; ipack < packed->
nsend_packs; ipack++) {
525#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
545 assert(iter != NULL);
575 const int shifted_itick = (iter->
itick + shift) % iter->
nticks;
void cp_mpi_free_mem(void *mem)
Wrapper around MPI_Free_mem.
void cp_mpi_max_int(int *values, const int count, const cp_mpi_comm_t comm)
Wrapper around MPI_Allreduce for op MPI_MAX and datatype MPI_INT.
int cp_mpi_sendrecv_byte(const void *sendbuf, const int sendcount, const int dest, const int sendtag, void *recvbuf, const int recvcount, const int source, const int recvtag, const cp_mpi_comm_t comm)
Wrapper around MPI_Sendrecv for datatype MPI_BYTE.
void cp_mpi_alltoallv_double(const double *sendbuf, const int *sendcounts, const int *sdispls, double *recvbuf, const int *recvcounts, const int *rdispls, const cp_mpi_comm_t comm)
Wrapper around MPI_Alltoallv for datatype MPI_DOUBLE.
int cp_mpi_cart_rank(const cp_mpi_comm_t comm, const int coords[])
Wrapper around MPI_Cart_rank.
void * cp_mpi_alloc_mem(size_t size)
Wrapper around MPI_Alloc_mem.
void cp_mpi_alltoall_int(const int *sendbuf, const int sendcount, int *recvbuf, const int recvcount, const cp_mpi_comm_t comm)
Wrapper around MPI_Alltoall for datatype MPI_INT.
bool cp_mpi_comms_are_similar(const cp_mpi_comm_t comm1, const cp_mpi_comm_t comm2)
Wrapper around MPI_Comm_compare.
void cp_mpi_alltoallv_byte(const void *sendbuf, const int *sendcounts, const int *sdispls, void *recvbuf, const int *recvcounts, const int *rdispls, const cp_mpi_comm_t comm)
Wrapper around MPI_Alltoallv for datatype MPI_BYTE.
int cp_mpi_sendrecv_double(const double *sendbuf, const int sendcount, const int dest, const int sendtag, double *recvbuf, const int recvcount, const int source, const int recvtag, const cp_mpi_comm_t comm)
Wrapper around MPI_Sendrecv for datatype MPI_DOUBLE.
static int imax(int x, int y)
Returns the larger of two given integers (missing from the C standard)
static int dbm_get_shard_index(const dbm_matrix_t *matrix, const int row, const int col)
Internal routine for getting a block's shard index.
static int dbm_get_num_shards(const dbm_matrix_t *matrix)
Internal routine that returns the number of shards for given matrix.
static void free_packed_matrix(dbm_packed_matrix_t *packed)
Private routine for releasing a packed matrix.
static void icumsum(const int n, const int input[n], int output[n])
Private routine for computing the cumulative sums of given numbers.
static void create_pack_plans(const bool trans_matrix, const bool trans_dist, const dbm_matrix_t *matrix, const cp_mpi_comm_t comm, const dbm_dist_1d_t *dist_indices, const dbm_dist_1d_t *dist_ticks, const int nticks, const int npacks, plan_t *plans_per_pack[npacks], int nblks_per_pack[npacks], int ndata_per_pack[npacks])
Private routine for planing packs.
static void postprocess_received_blocks(const int nranks, const int nshards, const int nblocks_recv, const int blks_recv_count[nranks], const int blks_recv_displ[nranks], const int data_recv_displ[nranks], dbm_pack_block_t blks_recv[nblocks_recv])
Private routine for post-processing received blocks.
dbm_comm_iterator_t * dbm_comm_iterator_start(const bool transa, const bool transb, const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b, const dbm_matrix_t *matrix_c)
Internal routine for creating a communication iterator.
static void fill_send_buffers(const dbm_matrix_t *matrix, const bool trans_matrix, const int nblks_send, const int ndata_send, plan_t plans[nblks_send], const int nranks, int blks_send_count[nranks], int data_send_count[nranks], int blks_send_displ[nranks], int data_send_displ[nranks], dbm_pack_block_t blks_send[nblks_send], double data_send[ndata_send])
Private routine for filling send buffers.
void dbm_comm_iterator_stop(dbm_comm_iterator_t *iter)
Internal routine for releasing the given communication iterator.
static dbm_pack_t * sendrecv_pack(const int itick, const int nticks, dbm_packed_matrix_t *packed)
Private routine for sending and receiving the pack for the given tick.
static int compare_pack_blocks_by_sum_index(const void *a, const void *b)
Private comperator passed to qsort to compare two blocks by sum_index.
bool dbm_comm_iterator_next(dbm_comm_iterator_t *iter, dbm_pack_t **pack_a, dbm_pack_t **pack_b)
Internal routine for retriving next pair of packs from given iterator.
static dbm_packed_matrix_t pack_matrix(const bool trans_matrix, const bool trans_dist, const dbm_matrix_t *matrix, const dbm_distribution_t *dist, const int nticks)
Private routine for redistributing a matrix along selected dimensions.
static int isum(const int n, const int input[n])
Private routine for computing the sum of the given integers.
static void const int const int i
void offload_mempool_host_free(const void *memory)
Internal routine for releasing memory back to the pool.
void * offload_mempool_host_malloc(const size_t size)
Internal routine for allocating host memory from the pool.
Internal struct for storing a block's metadata.
Internal struct for storing a communication iterator.
dbm_packed_matrix_t packed_a
dbm_distribution_t * dist
dbm_packed_matrix_t packed_b
Internal struct for storing a one dimensional distribution.
Internal struct for storing a two dimensional distribution.
Internal struct for storing a matrix.
dbm_distribution_t * dist
Internal struct for storing a dbm_block_t plus its norm.
Internal struct for storing a pack - essentially a shard for MPI.
dbm_pack_block_t * blocks
Internal struct for storing a packed matrix.
const dbm_dist_1d_t * dist_ticks
const dbm_dist_1d_t * dist_indices
Internal struct for storing a matrix shard.
Private struct used for planing during pack_matrix.