16#include "../offload/offload_runtime.h"
31 const double filter_eps) {
32 const int nrows = (trans) ? matrix->
ncols : matrix->
nrows;
33 int *nblocks_per_row = calloc(nrows,
sizeof(
int));
34 float *row_max_eps = malloc(nrows *
sizeof(
float));
35 assert((nblocks_per_row != NULL && row_max_eps != NULL) || nrows == 0);
42 for (
int iblock = 0; iblock < shard->
nblocks; iblock++) {
44 const int row = (trans) ? blk->
col : blk->
row;
46 ++nblocks_per_row[row];
53 for (
int i = 0;
i < nrows;
i++) {
55 ((float)filter_eps) / ((float)
imax(1, nblocks_per_row[
i]));
56 row_max_eps[
i] = f * f;
60 free(nblocks_per_row);
69#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
70 dbm_multiply_gpu_context_t gpu;
84#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
86 matrix_c->
shards, &ctx->gpu);
101#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
102 return dbm_multiply_gpu_upload_packs(pack_a, pack_b, &ctx->gpu);
120 const bool force_cpu,
123#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
125 dbm_multiply_gpu_process_batch(ntasks, batch, alpha, shard_c, kshard,
147#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
148 dbm_multiply_gpu_stop(&ctx->gpu);
162 const float *rows_max_eps,
163 const bool retain_sparsity,
const bool force_cpu,
167 const float alpha2 = alpha * alpha;
168 int64_t flop_sum = 0;
172 int *shard_row_start = calloc(nshard_rows,
sizeof(
int));
173 int *shard_col_start = calloc(nshard_cols,
sizeof(
int));
174 assert(NULL != shard_row_start && NULL != shard_col_start);
176 const int *sum_index_sizes_a =
178 const int *sum_index_sizes_b =
180 const int *free_index_sizes_a =
182 const int *free_index_sizes_b =
185#pragma omp parallel reduction(+ : flop_sum)
192#pragma omp for nowait
193 for (
int iblock = 1; iblock < pack_a->
nblocks; iblock++) {
195 const int prev_shard_row =
197 if (prev_shard_row != shard_row) {
198 shard_row_start[shard_row] = iblock;
202 for (
int jblock = 1; jblock < pack_b->
nblocks; jblock++) {
204 const int prev_shard_col =
206 if (prev_shard_col != shard_col) {
207 shard_col_start[shard_col] = jblock;
211#pragma omp for collapse(2) DBM_OMP_SCHEDULE
212 for (
int shard_row = 0; shard_row < nshard_rows; shard_row++) {
213 for (
int shard_col = 0; shard_col < nshard_cols; shard_col++) {
214 const int ishard = shard_row * nshard_cols + shard_col;
220 const int iblock_start = shard_row_start[shard_row];
221 int jblock_start = shard_col_start[shard_col];
222 for (
int iblock = iblock_start; iblock < pack_a->
nblocks; iblock++) {
224 if (blk_a->
free_index % nshard_rows != shard_row) {
227 for (
int jblock = jblock_start; jblock < pack_b->
nblocks; jblock++) {
229 if (blk_b->
free_index % nshard_cols != shard_col) {
244 const float result_norm = alpha2 * blk_a->
norm * blk_b->
norm;
245 if (result_norm < rows_max_eps[blk_a->
free_index]) {
250 const int m = free_index_sizes_a[blk_a->
free_index];
251 const int n = free_index_sizes_b[blk_b->
free_index];
252 const int k = sum_index_sizes_a[blk_a->
sum_index];
255 assert(k == sum_index_sizes_b[blk_b->
sum_index]);
260 if (blk_c == NULL && retain_sparsity) {
262 }
else if (blk_c == NULL) {
264 assert(dbm_get_stored_coordinates(matrix_c, row, col) ==
270 const int64_t task_flops = 2LL * m * n * k;
271 if (task_flops == 0) {
274 flop_sum += task_flops;
288 ishard, shard_c,
false, force_cpu, context);
294 shard_c,
true, force_cpu, context);
301 free(shard_row_start);
302 free(shard_col_start);
314void dbm_multiply(
const bool transa,
const bool transb,
const double alpha,
317 const bool retain_sparsity,
const double filter_eps,
319 assert(omp_get_num_threads() == 1);
320 assert(matrix_a != NULL && matrix_b != NULL && matrix_c != NULL);
324 const int num_sum_index_a = (transa) ? matrix_a->
nrows : matrix_a->
ncols;
325 const int num_sum_index_b = (transb) ? matrix_b->
ncols : matrix_b->
nrows;
326 const int num_free_index_a = (transa) ? matrix_a->
ncols : matrix_a->
nrows;
327 const int num_free_index_b = (transb) ? matrix_b->
nrows : matrix_b->
ncols;
330 assert(num_sum_index_a == num_sum_index_b);
331 assert(num_free_index_a == matrix_c->
nrows);
332 assert(num_free_index_b == matrix_c->
ncols);
335 dbm_scale(matrix_c, beta);
338 const char *
const maxeps_env = getenv(
"DBM_MULTIPLY_MAXEPS");
339 const char *
const verify_env = getenv(
"DBM_MULTIPLY_VERIFY");
340 const double maxeps = (NULL == maxeps_env ? 1E-1 : fabs(atof(maxeps_env)));
342 (NULL == verify_env ? (NULL == maxeps_env ? 0 : 1) : atoi(verify_env));
346 dbm_create(&matrix_d, dist_shared, matrix_c->
name, matrix_c->
nrows,
348 dbm_copy(matrix_d, matrix_c);
371 multiply_packs(transa, transb, alpha, pack_a, pack_b, matrix_a, matrix_b,
372 matrix_c, rows_max_eps, retain_sparsity,
false ,
380 if (NULL != matrix_d) {
384 multiply_packs(transa, transb, alpha, pack_a, pack_b, matrix_a, matrix_b,
385 matrix_d, rows_max_eps, retain_sparsity,
true, NULL, ctx);
388 const double epsilon =
dbm_maxeps(matrix_d, matrix_c);
389 if (maxeps < epsilon) {
391 fprintf(stderr,
"WARN ACC/LIBDBM: diff=%g\n", epsilon);
393 fprintf(stderr,
"ERROR ACC/LIBDBM: diff=%g\n", epsilon);
397 dbm_release(matrix_d);
404 dbm_filter(matrix_c, filter_eps);
#define DBM_MAX_BATCH_SIZE
static int imax(int x, int y)
Returns the larger of two given integers (missing from the C standard)
void dbm_library_counter_increment(const int m, const int n, const int k)
Add given block multiplication to stats. This routine is thread-safe.
double dbm_maxeps(const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b)
Calculates maximum relative difference between matrix_a and matrix_b.
static int dbm_get_shard_index(const dbm_matrix_t *matrix, const int row, const int col)
Internal routine for getting a block's shard index.
static int dbm_get_num_shards(const dbm_matrix_t *matrix)
Internal routine that returns the number of shards for given matrix.
void * dbm_mempool_host_malloc(const size_t size)
Internal routine for allocating host memory from the pool.
void dbm_mempool_host_free(const void *memory)
Internal routine for releasing memory back to the pool.
void dbm_mpi_sum_int(int *values, const int count, const dbm_mpi_comm_t comm)
Wrapper around MPI_Allreduce for op MPI_SUM and datatype MPI_INT.
static float * compute_rows_max_eps(const bool trans, const dbm_matrix_t *matrix, const double filter_eps)
Private routine for computing the max filter threshold for each row.
static backend_context_t * backend_start(const dbm_matrix_t *matrix_c)
Private routine for initializing the multiplication backend.
static bool backend_upload_packs(const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, backend_context_t *ctx)
Private routine for handing newly arrived packs to the backend.
static void backend_stop(backend_context_t *ctx)
Private routine for shutting down the multiplication backend.
static void multiply_packs(const bool transa, const bool transb, const double alpha, const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b, dbm_matrix_t *matrix_c, const float *rows_max_eps, const bool retain_sparsity, const bool force_cpu, int64_t *flop, backend_context_t *ctx)
Private routine for multipling two packs.
static void backend_process_batch(const int ntasks, const dbm_task_t batch[ntasks], const double alpha, const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, const int kshard, dbm_shard_t *shard_c, const bool finish, const bool force_cpu, backend_context_t *ctx)
Private routine for sending a batch to the multiplication backend.
dbm_comm_iterator_t * dbm_comm_iterator_start(const bool transa, const bool transb, const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b, const dbm_matrix_t *matrix_c)
Internal routine for creating a communication iterator.
void dbm_comm_iterator_stop(dbm_comm_iterator_t *iter)
Internal routine for releasing the given communication iterator.
bool dbm_comm_iterator_next(dbm_comm_iterator_t *iter, dbm_pack_t **pack_a, dbm_pack_t **pack_b)
Internal routine for retriving next pair of packs from given iterator.
void dbm_multiply_cpu_process_batch(int ntasks, const dbm_task_t batch[ntasks], double alpha, const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, dbm_shard_t *shard_c, int options)
Internal routine for executing the tasks in given batch on the CPU.
@ DBM_MULTIPLY_BLAS_LIBRARY
@ DBM_MULTIPLY_TASK_REORDER
dbm_block_t * dbm_shard_promise_new_block(dbm_shard_t *shard, const int row, const int col, const int block_size)
Internal routine for allocating the metadata of a new block.
dbm_block_t * dbm_shard_lookup(const dbm_shard_t *shard, const int row, const int col)
Internal routine for looking up a block from a shard.
static void const int const int i
Private struct for storing the context of the multiplication backend.
Internal struct for storing a block's metadata.
Internal struct for storing a communication iterator.
Internal struct for storing a two dimensional distribution.
Internal struct for storing a matrix.
dbm_distribution_t * dist
Internal struct for storing a dbm_block_t plus its norm.
Internal struct for storing a pack - essentially a shard for MPI.
dbm_pack_block_t * blocks
Internal struct for storing a matrix shard.
Internal struct for storing a task, ie. a single block multiplication.