22static int gcd(
const int a,
const int b) {
32static int lcm(
const int a,
const int b) {
return (a * b) / gcd(a, b); }
38static inline int isum(
const int n,
const int input[n]) {
40 for (
int i = 0;
i < n;
i++) {
50static inline void icumsum(
const int n,
const int input[n],
int output[n]) {
52 for (
int i = 1;
i < n;
i++) {
53 output[
i] = output[
i - 1] + input[
i - 1];
77 const int npacks,
plan_t *plans_per_pack[npacks],
78 int nblks_per_pack[npacks],
79 int ndata_per_pack[npacks]) {
81 memset(nblks_per_pack, 0, npacks *
sizeof(
int));
82 memset(ndata_per_pack, 0, npacks *
sizeof(
int));
87 int nblks_mythread[npacks];
88 memset(nblks_mythread, 0, npacks *
sizeof(
int));
89#pragma omp for schedule(static)
92 for (
int iblock = 0; iblock < shard->
nblocks; iblock++) {
94 const int sum_index = (trans_matrix) ? blk->
row : blk->
col;
95 const int itick = (1021 * sum_index) % nticks;
96 const int ipack = itick / dist_ticks->
nranks;
97 nblks_mythread[ipack]++;
103 for (
int ipack = 0; ipack < npacks; ipack++) {
104 nblks_per_pack[ipack] += nblks_mythread[ipack];
105 nblks_mythread[ipack] = nblks_per_pack[ipack];
109 for (
int ipack = 0; ipack < npacks; ipack++) {
110 plans_per_pack[ipack] = malloc(nblks_per_pack[ipack] *
sizeof(
plan_t));
111 assert(plans_per_pack[ipack] != NULL);
115 int ndata_mythread[npacks];
116 memset(ndata_mythread, 0, npacks *
sizeof(
int));
117#pragma omp for schedule(static)
120 for (
int iblock = 0; iblock < shard->
nblocks; iblock++) {
122 const int free_index = (trans_matrix) ? blk->
col : blk->
row;
123 const int sum_index = (trans_matrix) ? blk->
row : blk->
col;
124 const int itick = (1021 * sum_index) % nticks;
125 const int ipack = itick / dist_ticks->
nranks;
127 const int coord_free_idx = dist_indices->
index2coord[free_index];
128 const int coord_sum_idx = itick % dist_ticks->
nranks;
129 const int coords[2] = {(trans_dist) ? coord_sum_idx : coord_free_idx,
130 (trans_dist) ? coord_free_idx : coord_sum_idx};
134 ndata_mythread[ipack] += row_size * col_size;
136 const int iplan = --nblks_mythread[ipack];
137 plans_per_pack[ipack][iplan].blk = blk;
138 plans_per_pack[ipack][iplan].rank = rank;
139 plans_per_pack[ipack][iplan].row_size = row_size;
140 plans_per_pack[ipack][iplan].col_size = col_size;
144 for (
int ipack = 0; ipack < npacks; ipack++) {
145 ndata_per_pack[ipack] += ndata_mythread[ipack];
155 const dbm_matrix_t *matrix,
const bool trans_matrix,
const int nblks_send,
156 const int ndata_send,
plan_t plans[nblks_send],
const int nranks,
157 int blks_send_count[nranks],
int data_send_count[nranks],
158 int blks_send_displ[nranks],
int data_send_displ[nranks],
161 memset(blks_send_count, 0, nranks *
sizeof(
int));
162 memset(data_send_count, 0, nranks *
sizeof(
int));
167 int nblks_mythread[nranks], ndata_mythread[nranks];
168 memset(nblks_mythread, 0, nranks *
sizeof(
int));
169 memset(ndata_mythread, 0, nranks *
sizeof(
int));
170#pragma omp for schedule(static)
171 for (
int iblock = 0; iblock < nblks_send; iblock++) {
172 const plan_t *plan = &plans[iblock];
173 nblks_mythread[plan->
rank] += 1;
179 for (
int irank = 0; irank < nranks; irank++) {
180 blks_send_count[irank] += nblks_mythread[irank];
181 data_send_count[irank] += ndata_mythread[irank];
182 nblks_mythread[irank] = blks_send_count[irank];
183 ndata_mythread[irank] = data_send_count[irank];
190 icumsum(nranks, blks_send_count, blks_send_displ);
191 icumsum(nranks, data_send_count, data_send_displ);
192 const int m = nranks - 1;
193 assert(nblks_send == blks_send_displ[m] + blks_send_count[m]);
194 assert(ndata_send == data_send_displ[m] + data_send_count[m]);
199#pragma omp for schedule(static)
200 for (
int iblock = 0; iblock < nblks_send; iblock++) {
201 const plan_t *plan = &plans[iblock];
205 const double *blk_data = &shard->
data[blk->
offset];
207 const int plan_size = row_size * col_size;
208 const int irank = plan->
rank;
213 nblks_mythread[irank] -= 1;
214 ndata_mythread[irank] -= plan_size;
215 const int offset = data_send_displ[irank] + ndata_mythread[irank];
216 const int jblock = blks_send_displ[irank] + nblks_mythread[irank];
221 for (
int i = 0;
i < row_size;
i++) {
222 for (
int j = 0; j < col_size; j++) {
223 const double element = blk_data[j * row_size +
i];
224 data_send[offset +
i * col_size + j] = element;
225 norm += element * element;
228 blks_send[jblock].free_index = plan->
blk->
col;
229 blks_send[jblock].sum_index = plan->
blk->
row;
231 for (
int i = 0;
i < plan_size;
i++) {
232 const double element = blk_data[
i];
233 data_send[offset +
i] = element;
234 norm += element * element;
236 blks_send[jblock].free_index = plan->
blk->
row;
237 blks_send[jblock].sum_index = plan->
blk->
col;
239 blks_send[jblock].norm = (float)norm;
242 blks_send[jblock].offset = offset - data_send_displ[irank];
262 const int nranks,
const int nshards,
const int nblocks_recv,
263 const int blks_recv_count[nranks],
const int blks_recv_displ[nranks],
264 const int data_recv_displ[nranks],
267 int nblocks_per_shard[nshards], shard_start[nshards];
268 memset(nblocks_per_shard, 0, nshards *
sizeof(
int));
271 assert(blocks_tmp != NULL);
276 for (
int irank = 0; irank < nranks; irank++) {
278 for (
int i = 0;
i < blks_recv_count[irank];
i++) {
279 blks_recv[blks_recv_displ[irank] +
i].offset += data_recv_displ[irank];
284 int nblocks_mythread[nshards];
285 memset(nblocks_mythread, 0, nshards *
sizeof(
int));
286#pragma omp for schedule(static)
287 for (
int iblock = 0; iblock < nblocks_recv; iblock++) {
288 blocks_tmp[iblock] = blks_recv[iblock];
289 const int ishard = blks_recv[iblock].
free_index % nshards;
290 nblocks_mythread[ishard]++;
293 for (
int ishard = 0; ishard < nshards; ishard++) {
294 nblocks_per_shard[ishard] += nblocks_mythread[ishard];
295 nblocks_mythread[ishard] = nblocks_per_shard[ishard];
299 icumsum(nshards, nblocks_per_shard, shard_start);
301#pragma omp for schedule(static)
302 for (
int iblock = 0; iblock < nblocks_recv; iblock++) {
303 const int ishard = blocks_tmp[iblock].
free_index % nshards;
304 const int jblock = --nblocks_mythread[ishard] + shard_start[ishard];
305 blks_recv[jblock] = blocks_tmp[iblock];
310 for (
int ishard = 0; ishard < nshards; ishard++) {
311 if (nblocks_per_shard[ishard] > 1) {
312 qsort(&blks_recv[shard_start[ishard]], nblocks_per_shard[ishard],
326 const bool trans_dist,
335 const dbm_dist_1d_t *dist_indices = (trans_dist) ? &dist->cols : &dist->rows;
336 const dbm_dist_1d_t *dist_ticks = (trans_dist) ? &dist->rows : &dist->cols;
339 const int nsend_packs = nticks / dist_ticks->
nranks;
340 assert(nsend_packs * dist_ticks->
nranks == nticks);
349 plan_t *plans_per_pack[nsend_packs];
350 int nblks_send_per_pack[nsend_packs], ndata_send_per_pack[nsend_packs];
352 dist_ticks, nticks, nsend_packs, plans_per_pack,
353 nblks_send_per_pack, ndata_send_per_pack);
356 int nblks_send_max = 0, ndata_send_max = 0;
357 for (
int ipack = 0; ipack < nsend_packs; ++ipack) {
358 nblks_send_max =
imax(nblks_send_max, nblks_send_per_pack[ipack]);
359 ndata_send_max =
imax(ndata_send_max, ndata_send_per_pack[ipack]);
366 for (
int ipack = 0; ipack < nsend_packs; ipack++) {
368 const int nranks = dist->nranks;
369 int blks_send_count[nranks], data_send_count[nranks];
370 int blks_send_displ[nranks], data_send_displ[nranks];
372 ndata_send_per_pack[ipack], plans_per_pack[ipack], nranks,
373 blks_send_count, data_send_count, blks_send_displ,
374 data_send_displ, blks_send, data_send);
375 free(plans_per_pack[ipack]);
378 int blks_recv_count[nranks], blks_recv_displ[nranks];
380 icumsum(nranks, blks_recv_count, blks_recv_displ);
381 const int nblocks_recv =
isum(nranks, blks_recv_count);
386 int blks_send_count_byte[nranks], blks_send_displ_byte[nranks];
387 int blks_recv_count_byte[nranks], blks_recv_displ_byte[nranks];
388 for (
int i = 0;
i < nranks;
i++) {
395 blks_send, blks_send_count_byte, blks_send_displ_byte, blks_recv,
396 blks_recv_count_byte, blks_recv_displ_byte, dist->comm);
400 int data_recv_count[nranks], data_recv_displ[nranks];
402 icumsum(nranks, data_recv_count, data_recv_displ);
403 const int ndata_recv =
isum(nranks, data_recv_count);
408 data_recv, data_recv_count, data_recv_displ,
413 blks_recv_count, blks_recv_displ,
414 data_recv_displ, blks_recv);
426 int max_nblocks = 0, max_data_size = 0;
427 for (
int ipack = 0; ipack < packed.
nsend_packs; ipack++) {
453 const int itick_of_rank0 = (itick + nticks - my_rank) % nticks;
454 const int send_rank = (my_rank + nticks - itick_of_rank0) % nranks;
455 const int send_itick = (itick_of_rank0 + send_rank) % nticks;
456 const int send_ipack = send_itick / nranks;
457 assert(send_itick % nranks == my_rank);
460 const int recv_rank = itick % nranks;
461 const int recv_ipack = itick / nranks;
464 if (send_rank == my_rank) {
465 assert(send_rank == recv_rank && send_ipack == recv_ipack);
506 for (
int ipack = 0; ipack < packed->
nsend_packs; ipack++) {
524 assert(iter != NULL);
554 const int shifted_itick = (iter->
itick + shift) % iter->
nticks;
static int imax(int x, int y)
Returns the larger of two given integers (missing from the C standard)
static int dbm_get_shard_index(const dbm_matrix_t *matrix, const int row, const int col)
Internal routine for getting a block's shard index.
static int dbm_get_num_shards(const dbm_matrix_t *matrix)
Internal routine that returns the number of shards for given matrix.
void dbm_mempool_host_free(const void *memory)
Internal routine for releasing memory back to the pool.
void * dbm_mempool_host_malloc(size_t size)
Internal routine for allocating host memory from the pool.
int dbm_mpi_sendrecv_byte(const void *sendbuf, const int sendcount, const int dest, const int sendtag, void *recvbuf, const int recvcount, const int source, const int recvtag, const dbm_mpi_comm_t comm)
Wrapper around MPI_Sendrecv for datatype MPI_BYTE.
void dbm_mpi_free_mem(void *mem)
Wrapper around MPI_Free_mem.
void * dbm_mpi_alloc_mem(size_t size)
Wrapper around MPI_Alloc_mem.
int dbm_mpi_cart_rank(const dbm_mpi_comm_t comm, const int coords[])
Wrapper around MPI_Cart_rank.
bool dbm_mpi_comms_are_similar(const dbm_mpi_comm_t comm1, const dbm_mpi_comm_t comm2)
Wrapper around MPI_Comm_compare.
void dbm_mpi_alltoallv_byte(const void *sendbuf, const int *sendcounts, const int *sdispls, void *recvbuf, const int *recvcounts, const int *rdispls, const dbm_mpi_comm_t comm)
Wrapper around MPI_Alltoallv for datatype MPI_BYTE.
void dbm_mpi_alltoall_int(const int *sendbuf, const int sendcount, int *recvbuf, const int recvcount, const dbm_mpi_comm_t comm)
Wrapper around MPI_Alltoall for datatype MPI_INT.
int dbm_mpi_sendrecv_double(const double *sendbuf, const int sendcount, const int dest, const int sendtag, double *recvbuf, const int recvcount, const int source, const int recvtag, const dbm_mpi_comm_t comm)
Wrapper around MPI_Sendrecv for datatype MPI_DOUBLE.
void dbm_mpi_max_int(int *values, const int count, const dbm_mpi_comm_t comm)
Wrapper around MPI_Allreduce for op MPI_MAX and datatype MPI_INT.
void dbm_mpi_alltoallv_double(const double *sendbuf, const int *sendcounts, const int *sdispls, double *recvbuf, const int *recvcounts, const int *rdispls, const dbm_mpi_comm_t comm)
Wrapper around MPI_Alltoallv for datatype MPI_DOUBLE.
static void create_pack_plans(const bool trans_matrix, const bool trans_dist, const dbm_matrix_t *matrix, const dbm_mpi_comm_t comm, const dbm_dist_1d_t *dist_indices, const dbm_dist_1d_t *dist_ticks, const int nticks, const int npacks, plan_t *plans_per_pack[npacks], int nblks_per_pack[npacks], int ndata_per_pack[npacks])
Private routine for planing packs.
static void free_packed_matrix(dbm_packed_matrix_t *packed)
Private routine for releasing a packed matrix.
static void icumsum(const int n, const int input[n], int output[n])
Private routine for computing the cumulative sums of given numbers.
static void postprocess_received_blocks(const int nranks, const int nshards, const int nblocks_recv, const int blks_recv_count[nranks], const int blks_recv_displ[nranks], const int data_recv_displ[nranks], dbm_pack_block_t blks_recv[nblocks_recv])
Private routine for post-processing received blocks.
dbm_comm_iterator_t * dbm_comm_iterator_start(const bool transa, const bool transb, const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b, const dbm_matrix_t *matrix_c)
Internal routine for creating a communication iterator.
static void fill_send_buffers(const dbm_matrix_t *matrix, const bool trans_matrix, const int nblks_send, const int ndata_send, plan_t plans[nblks_send], const int nranks, int blks_send_count[nranks], int data_send_count[nranks], int blks_send_displ[nranks], int data_send_displ[nranks], dbm_pack_block_t blks_send[nblks_send], double data_send[ndata_send])
Private routine for filling send buffers.
void dbm_comm_iterator_stop(dbm_comm_iterator_t *iter)
Internal routine for releasing the given communication iterator.
static dbm_pack_t * sendrecv_pack(const int itick, const int nticks, dbm_packed_matrix_t *packed)
Private routine for sending and receiving the pack for the given tick.
static int compare_pack_blocks_by_sum_index(const void *a, const void *b)
Private comperator passed to qsort to compare two blocks by sum_index.
bool dbm_comm_iterator_next(dbm_comm_iterator_t *iter, dbm_pack_t **pack_a, dbm_pack_t **pack_b)
Internal routine for retriving next pair of packs from given iterator.
static dbm_packed_matrix_t pack_matrix(const bool trans_matrix, const bool trans_dist, const dbm_matrix_t *matrix, const dbm_distribution_t *dist, const int nticks)
Private routine for redistributing a matrix along selected dimensions.
static int isum(const int n, const int input[n])
Private routine for computing the sum of the given integers.
static void const int const int i
Internal struct for storing a block's metadata.
Internal struct for storing a communication iterator.
dbm_packed_matrix_t packed_a
dbm_distribution_t * dist
dbm_packed_matrix_t packed_b
Internal struct for storing a one dimensional distribution.
Internal struct for storing a two dimensional distribution.
Internal struct for storing a matrix.
dbm_distribution_t * dist
Internal struct for storing a dbm_block_t plus its norm.
Internal struct for storing a pack - essentially a shard for MPI.
dbm_pack_block_t * blocks
Internal struct for storing a packed matrix.
const dbm_dist_1d_t * dist_ticks
const dbm_dist_1d_t * dist_indices
Internal struct for storing a matrix shard.
Private struct used for planing during pack_matrix.