8#include "../mpiwrap/cp_mpi.h"
9#include "../offload/offload_mempool.h"
16#define DBM_MULTIPLY_COMM_MEMPOOL
23static int gcd(
const int a,
const int b) {
34static int lcm(
const int a,
const int b) {
return (a * b) / gcd(a, b); }
40static inline int isum(
const int n,
const int input[n]) {
42 for (
int i = 0;
i < n;
i++) {
52static inline void icumsum(
const int n,
const int input[n],
int output[n]) {
54 for (
int i = 1;
i < n;
i++) {
55 output[
i] = output[
i - 1] + input[
i - 1];
77 return ((
unsigned long long)sum_index * 1021ULL) % (
unsigned long long)nticks;
89 const int npacks,
plan_t *plans_per_pack[npacks],
90 int nblks_per_pack[npacks],
91 int ndata_per_pack[npacks]) {
93 memset(nblks_per_pack, 0, npacks *
sizeof(
int));
94 memset(ndata_per_pack, 0, npacks *
sizeof(
int));
99 int nblks_mythread[npacks];
100 memset(nblks_mythread, 0, npacks *
sizeof(
int));
101#pragma omp for schedule(static)
104 for (
int iblock = 0; iblock < shard->
nblocks; iblock++) {
106 const int sum_index = (trans_matrix) ? blk->
row : blk->
col;
108 const int ipack = itick64 / dist_ticks->
nranks;
109 nblks_mythread[ipack]++;
115 for (
int ipack = 0; ipack < npacks; ipack++) {
116 nblks_per_pack[ipack] += nblks_mythread[ipack];
117 nblks_mythread[ipack] = nblks_per_pack[ipack];
121 for (
int ipack = 0; ipack < npacks; ipack++) {
122 const int nblks = nblks_per_pack[ipack];
123 plans_per_pack[ipack] = malloc(nblks *
sizeof(
plan_t));
124 assert(plans_per_pack[ipack] != NULL || nblks == 0);
128 int ndata_mythread[npacks];
129 memset(ndata_mythread, 0, npacks *
sizeof(
int));
130#pragma omp for schedule(static)
133 for (
int iblock = 0; iblock < shard->
nblocks; iblock++) {
135 const int free_index = (trans_matrix) ? blk->
col : blk->
row;
136 const int sum_index = (trans_matrix) ? blk->
row : blk->
col;
138 const int ipack = itick64 / dist_ticks->
nranks;
140 const int coord_free_idx = dist_indices->
index2coord[free_index];
141 const int coord_sum_idx = itick64 % dist_ticks->
nranks;
142 const int coords[2] = {(trans_dist) ? coord_sum_idx : coord_free_idx,
143 (trans_dist) ? coord_free_idx : coord_sum_idx};
147 ndata_mythread[ipack] += row_size * col_size;
149 const int iplan = --nblks_mythread[ipack];
150 plans_per_pack[ipack][iplan].blk = blk;
151 plans_per_pack[ipack][iplan].rank = rank;
152 plans_per_pack[ipack][iplan].row_size = row_size;
153 plans_per_pack[ipack][iplan].col_size = col_size;
157 for (
int ipack = 0; ipack < npacks; ipack++) {
158 ndata_per_pack[ipack] += ndata_mythread[ipack];
168 const dbm_matrix_t *matrix,
const bool trans_matrix,
const int nblks_send,
169 const int ndata_send,
plan_t plans[nblks_send],
const int nranks,
170 int blks_send_count[nranks],
int data_send_count[nranks],
171 int blks_send_displ[nranks],
int data_send_displ[nranks],
174 memset(blks_send_count, 0, nranks *
sizeof(
int));
175 memset(data_send_count, 0, nranks *
sizeof(
int));
180 int nblks_mythread[nranks], ndata_mythread[nranks];
181 memset(nblks_mythread, 0, nranks *
sizeof(
int));
182 memset(ndata_mythread, 0, nranks *
sizeof(
int));
183#pragma omp for schedule(static)
184 for (
int iblock = 0; iblock < nblks_send; iblock++) {
185 const plan_t *plan = &plans[iblock];
186 nblks_mythread[plan->
rank] += 1;
192 for (
int irank = 0; irank < nranks; irank++) {
193 blks_send_count[irank] += nblks_mythread[irank];
194 data_send_count[irank] += ndata_mythread[irank];
195 nblks_mythread[irank] = blks_send_count[irank];
196 ndata_mythread[irank] = data_send_count[irank];
203 icumsum(nranks, blks_send_count, blks_send_displ);
204 icumsum(nranks, data_send_count, data_send_displ);
205 const int m = nranks - 1;
206 assert(nblks_send == blks_send_displ[m] + blks_send_count[m]);
207 assert(ndata_send == data_send_displ[m] + data_send_count[m]);
212#pragma omp for schedule(static)
213 for (
int iblock = 0; iblock < nblks_send; iblock++) {
214 const plan_t *plan = &plans[iblock];
218 const double *blk_data = &shard->
data[blk->
offset];
220 const int plan_size = row_size * col_size;
221 const int irank = plan->
rank;
226 nblks_mythread[irank] -= 1;
227 ndata_mythread[irank] -= plan_size;
228 const int offset = data_send_displ[irank] + ndata_mythread[irank];
229 const int jblock = blks_send_displ[irank] + nblks_mythread[irank];
234 for (
int i = 0;
i < row_size;
i++) {
235 for (
int j = 0; j < col_size; j++) {
236 const double element = blk_data[j * row_size +
i];
237 data_send[offset +
i * col_size + j] = element;
238 norm += element * element;
241 blks_send[jblock].free_index = plan->
blk->
col;
242 blks_send[jblock].sum_index = plan->
blk->
row;
244 for (
int i = 0;
i < plan_size;
i++) {
245 const double element = blk_data[
i];
246 data_send[offset +
i] = element;
247 norm += element * element;
249 blks_send[jblock].free_index = plan->
blk->
row;
250 blks_send[jblock].sum_index = plan->
blk->
col;
252 blks_send[jblock].norm = (float)norm;
255 blks_send[jblock].offset = offset - data_send_displ[irank];
275 const int nranks,
const int nshards,
const int nblocks_recv,
276 const int blks_recv_count[nranks],
const int blks_recv_displ[nranks],
277 const int data_recv_displ[nranks],
280 int nblocks_per_shard[nshards], shard_start[nshards];
281 memset(nblocks_per_shard, 0, nshards *
sizeof(
int));
284 assert(blocks_tmp != NULL || nblocks_recv == 0);
289 for (
int irank = 0; irank < nranks; irank++) {
291 for (
int i = 0;
i < blks_recv_count[irank];
i++) {
292 blks_recv[blks_recv_displ[irank] +
i].offset += data_recv_displ[irank];
297 int nblocks_mythread[nshards];
298 memset(nblocks_mythread, 0, nshards *
sizeof(
int));
299#pragma omp for schedule(static)
300 for (
int iblock = 0; iblock < nblocks_recv; iblock++) {
301 blocks_tmp[iblock] = blks_recv[iblock];
302 const int ishard = blks_recv[iblock].
free_index % nshards;
303 nblocks_mythread[ishard]++;
306 for (
int ishard = 0; ishard < nshards; ishard++) {
307 nblocks_per_shard[ishard] += nblocks_mythread[ishard];
308 nblocks_mythread[ishard] = nblocks_per_shard[ishard];
312 icumsum(nshards, nblocks_per_shard, shard_start);
314#pragma omp for schedule(static)
315 for (
int iblock = 0; iblock < nblocks_recv; iblock++) {
316 const int ishard = blocks_tmp[iblock].
free_index % nshards;
317 const int jblock = --nblocks_mythread[ishard] + shard_start[ishard];
318 blks_recv[jblock] = blocks_tmp[iblock];
323 for (
int ishard = 0; ishard < nshards; ishard++) {
324 if (nblocks_per_shard[ishard] > 1) {
325 qsort(&blks_recv[shard_start[ishard]], nblocks_per_shard[ishard],
339 const bool trans_dist,
348 const dbm_dist_1d_t *dist_indices = (trans_dist) ? &dist->cols : &dist->rows;
349 const dbm_dist_1d_t *dist_ticks = (trans_dist) ? &dist->rows : &dist->cols;
352 const int nsend_packs = nticks / dist_ticks->
nranks;
353 assert(nsend_packs * dist_ticks->
nranks == nticks);
359 assert(packed.
send_packs != NULL || nsend_packs == 0);
362 plan_t *plans_per_pack[nsend_packs];
363 int nblks_send_per_pack[nsend_packs], ndata_send_per_pack[nsend_packs];
365 dist_ticks, nticks, nsend_packs, plans_per_pack,
366 nblks_send_per_pack, ndata_send_per_pack);
369 int nblks_send_max = 0, ndata_send_max = 0;
370 for (
int ipack = 0; ipack < nsend_packs; ++ipack) {
371 nblks_send_max =
imax(nblks_send_max, nblks_send_per_pack[ipack]);
372 ndata_send_max =
imax(ndata_send_max, ndata_send_per_pack[ipack]);
379 for (
int ipack = 0; ipack < nsend_packs; ipack++) {
381 const int nranks = dist->nranks;
382 int blks_send_count[nranks], data_send_count[nranks];
383 int blks_send_displ[nranks], data_send_displ[nranks];
385 ndata_send_per_pack[ipack], plans_per_pack[ipack], nranks,
386 blks_send_count, data_send_count, blks_send_displ,
387 data_send_displ, blks_send, data_send);
388 free(plans_per_pack[ipack]);
391 int blks_recv_count[nranks], blks_recv_displ[nranks];
393 icumsum(nranks, blks_recv_count, blks_recv_displ);
394 const int nblocks_recv =
isum(nranks, blks_recv_count);
399 int blks_send_count_byte[nranks], blks_send_displ_byte[nranks];
400 int blks_recv_count_byte[nranks], blks_recv_displ_byte[nranks];
401 for (
int i = 0;
i < nranks;
i++) {
408 blks_recv, blks_recv_count_byte, blks_recv_displ_byte,
413 int data_recv_count[nranks], data_recv_displ[nranks];
415 icumsum(nranks, data_recv_count, data_recv_displ);
416 const int ndata_recv =
isum(nranks, data_recv_count);
419#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
426 data_recv, data_recv_count, data_recv_displ,
431 blks_recv_count, blks_recv_displ,
432 data_recv_displ, blks_recv);
444 int max_nblocks = 0, max_data_size = 0;
445 for (
int ipack = 0; ipack < packed.
nsend_packs; ipack++) {
455#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
476 const int itick_of_rank0 = (itick + nticks - my_rank) % nticks;
477 const int send_rank = (my_rank + nticks - itick_of_rank0) % nranks;
478 const int send_itick = (itick_of_rank0 + send_rank) % nticks;
479 const int send_ipack = send_itick / nranks;
480 assert(send_itick % nranks == my_rank);
483 const int recv_rank = itick % nranks;
484 const int recv_ipack = itick / nranks;
487 if (send_rank == my_rank) {
488 assert(send_rank == recv_rank && send_ipack == recv_ipack);
528#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
533 for (
int ipack = 0; ipack < packed->
nsend_packs; ipack++) {
535#if defined(DBM_MULTIPLY_COMM_MEMPOOL)
555 assert(iter != NULL);
585 const int shifted_itick = (iter->
itick + shift) % iter->
nticks;
void cp_mpi_free_mem(void *mem)
Wrapper around MPI_Free_mem.
void cp_mpi_max_int(int *values, const int count, const cp_mpi_comm_t comm)
Wrapper around MPI_Allreduce for op MPI_MAX and datatype MPI_INT.
int cp_mpi_sendrecv_byte(const void *sendbuf, const int sendcount, const int dest, const int sendtag, void *recvbuf, const int recvcount, const int source, const int recvtag, const cp_mpi_comm_t comm)
Wrapper around MPI_Sendrecv for datatype MPI_BYTE.
void cp_mpi_alltoallv_double(const double *sendbuf, const int *sendcounts, const int *sdispls, double *recvbuf, const int *recvcounts, const int *rdispls, const cp_mpi_comm_t comm)
Wrapper around MPI_Alltoallv for datatype MPI_DOUBLE.
int cp_mpi_cart_rank(const cp_mpi_comm_t comm, const int coords[])
Wrapper around MPI_Cart_rank.
void * cp_mpi_alloc_mem(size_t size)
Wrapper around MPI_Alloc_mem.
void cp_mpi_alltoall_int(const int *sendbuf, const int sendcount, int *recvbuf, const int recvcount, const cp_mpi_comm_t comm)
Wrapper around MPI_Alltoall for datatype MPI_INT.
bool cp_mpi_comms_are_similar(const cp_mpi_comm_t comm1, const cp_mpi_comm_t comm2)
Wrapper around MPI_Comm_compare.
void cp_mpi_alltoallv_byte(const void *sendbuf, const int *sendcounts, const int *sdispls, void *recvbuf, const int *recvcounts, const int *rdispls, const cp_mpi_comm_t comm)
Wrapper around MPI_Alltoallv for datatype MPI_BYTE.
int cp_mpi_sendrecv_double(const double *sendbuf, const int sendcount, const int dest, const int sendtag, double *recvbuf, const int recvcount, const int source, const int recvtag, const cp_mpi_comm_t comm)
Wrapper around MPI_Sendrecv for datatype MPI_DOUBLE.
static int imax(int x, int y)
Returns the larger of two given integers (missing from the C standard)
static int dbm_get_shard_index(const dbm_matrix_t *matrix, const int row, const int col)
Internal routine for getting a block's shard index.
static int dbm_get_num_shards(const dbm_matrix_t *matrix)
Internal routine that returns the number of shards for given matrix.
static void free_packed_matrix(dbm_packed_matrix_t *packed)
Private routine for releasing a packed matrix.
static void icumsum(const int n, const int input[n], int output[n])
Private routine for computing the cumulative sums of given numbers.
static void create_pack_plans(const bool trans_matrix, const bool trans_dist, const dbm_matrix_t *matrix, const cp_mpi_comm_t comm, const dbm_dist_1d_t *dist_indices, const dbm_dist_1d_t *dist_ticks, const int nticks, const int npacks, plan_t *plans_per_pack[npacks], int nblks_per_pack[npacks], int ndata_per_pack[npacks])
Private routine for planing packs.
static void postprocess_received_blocks(const int nranks, const int nshards, const int nblocks_recv, const int blks_recv_count[nranks], const int blks_recv_displ[nranks], const int data_recv_displ[nranks], dbm_pack_block_t blks_recv[nblocks_recv])
Private routine for post-processing received blocks.
dbm_comm_iterator_t * dbm_comm_iterator_start(const bool transa, const bool transb, const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b, const dbm_matrix_t *matrix_c)
Internal routine for creating a communication iterator.
static void fill_send_buffers(const dbm_matrix_t *matrix, const bool trans_matrix, const int nblks_send, const int ndata_send, plan_t plans[nblks_send], const int nranks, int blks_send_count[nranks], int data_send_count[nranks], int blks_send_displ[nranks], int data_send_displ[nranks], dbm_pack_block_t blks_send[nblks_send], double data_send[ndata_send])
Private routine for filling send buffers.
void dbm_comm_iterator_stop(dbm_comm_iterator_t *iter)
Internal routine for releasing the given communication iterator.
static dbm_pack_t * sendrecv_pack(const int itick, const int nticks, dbm_packed_matrix_t *packed)
Private routine for sending and receiving the pack for the given tick.
static int compare_pack_blocks_by_sum_index(const void *a, const void *b)
Private comperator passed to qsort to compare two blocks by sum_index.
bool dbm_comm_iterator_next(dbm_comm_iterator_t *iter, dbm_pack_t **pack_a, dbm_pack_t **pack_b)
Internal routine for retriving next pair of packs from given iterator.
static dbm_packed_matrix_t pack_matrix(const bool trans_matrix, const bool trans_dist, const dbm_matrix_t *matrix, const dbm_distribution_t *dist, const int nticks)
Private routine for redistributing a matrix along selected dimensions.
static unsigned long long calculate_tick_index(int sum_index, int nticks)
Private routine for calculating tick indices in pack plans.
static int isum(const int n, const int input[n])
Private routine for computing the sum of the given integers.
static void const int const int i
void offload_mempool_host_free(const void *memory)
Internal routine for releasing memory back to the pool.
void * offload_mempool_host_malloc(const size_t size)
Internal routine for allocating host memory from the pool.
Internal struct for storing a block's metadata.
Internal struct for storing a communication iterator.
dbm_packed_matrix_t packed_a
dbm_distribution_t * dist
dbm_packed_matrix_t packed_b
Internal struct for storing a one dimensional distribution.
Internal struct for storing a two dimensional distribution.
Internal struct for storing a matrix.
dbm_distribution_t * dist
Internal struct for storing a dbm_block_t plus its norm.
Internal struct for storing a pack - essentially a shard for MPI.
dbm_pack_block_t * blocks
Internal struct for storing a packed matrix.
const dbm_dist_1d_t * dist_ticks
const dbm_dist_1d_t * dist_indices
Internal struct for storing a matrix shard.
Private struct used for planing during pack_matrix.