(git:b17b328)
Loading...
Searching...
No Matches
dbm_multiply.c
Go to the documentation of this file.
1/*----------------------------------------------------------------------------*/
2/* CP2K: A general program to perform molecular dynamics simulations */
3/* Copyright 2000-2025 CP2K developers group <https://cp2k.org> */
4/* */
5/* SPDX-License-Identifier: BSD-3-Clause */
6/*----------------------------------------------------------------------------*/
7
8#include <assert.h>
9#include <limits.h>
10#include <math.h>
11#include <omp.h>
12#include <stdio.h>
13#include <stdlib.h>
14#include <string.h>
15
16#include "../offload/offload_runtime.h"
17#include "dbm_hyperparams.h"
18#include "dbm_internal.h"
19#include "dbm_library.h"
20#include "dbm_mempool.h"
21#include "dbm_multiply.h"
22#include "dbm_multiply_comm.h"
23#include "dbm_multiply_cpu.h"
24#include "dbm_multiply_gpu.h"
25
26/*******************************************************************************
27 * \brief Private routine for computing the max filter threshold for each row.
28 * \author Ole Schuett
29 ******************************************************************************/
30static float *compute_rows_max_eps(const bool trans, const dbm_matrix_t *matrix,
31 const double filter_eps) {
32 const int nrows = (trans) ? matrix->ncols : matrix->nrows;
33 int *nblocks_per_row = calloc(nrows, sizeof(int));
34 float *row_max_eps = malloc(nrows * sizeof(float));
35 assert((nblocks_per_row != NULL && row_max_eps != NULL) || nrows == 0);
36
37#pragma omp parallel
38 {
39#pragma omp for
40 for (int ishard = 0; ishard < dbm_get_num_shards(matrix); ishard++) {
41 dbm_shard_t *shard = &matrix->shards[ishard];
42 for (int iblock = 0; iblock < shard->nblocks; iblock++) {
43 const dbm_block_t *blk = &shard->blocks[iblock];
44 const int row = (trans) ? blk->col : blk->row;
45#pragma omp atomic
46 ++nblocks_per_row[row];
47 }
48 }
49#pragma omp master
50 dbm_mpi_sum_int(nblocks_per_row, nrows, matrix->dist->comm);
51#pragma omp barrier
52#pragma omp for
53 for (int i = 0; i < nrows; i++) {
54 const float f =
55 ((float)filter_eps) / ((float)imax(1, nblocks_per_row[i]));
56 row_max_eps[i] = f * f;
57 }
58 } // end of omp parallel region
59
60 free(nblocks_per_row);
61 return row_max_eps; // Ownership of row_max_eps transfers to caller.
62}
63
64/*******************************************************************************
65 * \brief Private struct for storing the context of the multiplication backend.
66 * \author Ole Schuett
67 ******************************************************************************/
68typedef struct {
69#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
70 dbm_multiply_gpu_context_t gpu;
71#endif
72 int cpu_options; // Binary or'ed dbm_multiply_cpu_options (enum).
74
75/*******************************************************************************
76 * \brief Private routine for initializing the multiplication backend.
77 * \author Ole Schuett
78 ******************************************************************************/
80 backend_context_t *const ctx = calloc(1, sizeof(backend_context_t));
81 // BLAS and LIBXSMM benefit in general from DBM_MULTIPLY_TASK_REORDER.
83
84#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
85 dbm_multiply_gpu_start(DBM_MAX_BATCH_SIZE, dbm_get_num_shards(matrix_c),
86 matrix_c->shards, &ctx->gpu);
87#else
88 (void)matrix_c; // mark as used
89#endif
90
91 return ctx;
92}
93
94/*******************************************************************************
95 * \brief Private routine for handing newly arrived packs to the backend.
96 * \author Ole Schuett
97 ******************************************************************************/
98static bool backend_upload_packs(const dbm_pack_t *pack_a,
99 const dbm_pack_t *pack_b,
100 backend_context_t *ctx) {
101#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
102 return dbm_multiply_gpu_upload_packs(pack_a, pack_b, &ctx->gpu);
103#else
104 (void)pack_a; // mark as used
105 (void)pack_b;
106 (void)ctx;
107 return false;
108#endif
109}
110
111/*******************************************************************************
112 * \brief Private routine for sending a batch to the multiplication backend.
113 * \author Ole Schuett
114 ******************************************************************************/
115static void backend_process_batch(const int ntasks,
116 const dbm_task_t batch[ntasks],
117 const double alpha, const dbm_pack_t *pack_a,
118 const dbm_pack_t *pack_b, const int kshard,
119 dbm_shard_t *shard_c, const bool finish,
120 const bool force_cpu,
121 backend_context_t *ctx) {
122 if (NULL != ctx) {
123#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
124 if (!force_cpu) {
125 dbm_multiply_gpu_process_batch(ntasks, batch, alpha, shard_c, kshard,
126 finish, &ctx->gpu);
127 } else
128#endif
129 {
130 (void)kshard;
131 (void)finish;
132 (void)force_cpu;
133 dbm_multiply_cpu_process_batch(ntasks, batch, alpha, pack_a, pack_b,
134 shard_c, ctx->cpu_options);
135 }
136 } else { // Validate against host (aka CPU).
137 dbm_multiply_cpu_process_batch(ntasks, batch, alpha, pack_a, pack_b,
139 }
140}
141
142/*******************************************************************************
143 * \brief Private routine for shutting down the multiplication backend.
144 * \author Ole Schuett
145 ******************************************************************************/
147#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_DBM)
148 dbm_multiply_gpu_stop(&ctx->gpu);
149#endif
150 free(ctx);
151}
152
153/*******************************************************************************
154 * \brief Private routine for multipling two packs.
155 * \author Ole Schuett
156 ******************************************************************************/
157static void multiply_packs(const bool transa, const bool transb,
158 const double alpha, const dbm_pack_t *pack_a,
159 const dbm_pack_t *pack_b,
160 const dbm_matrix_t *matrix_a,
161 const dbm_matrix_t *matrix_b, dbm_matrix_t *matrix_c,
162 const float *rows_max_eps,
163 const bool retain_sparsity, const bool force_cpu,
164 int64_t *flop, backend_context_t *ctx) {
165 // For validation, FLOPS do not count, and relying on ctx is not necessary.
166 backend_context_t *const context = (NULL != flop ? ctx : NULL);
167 const float alpha2 = alpha * alpha;
168 int64_t flop_sum = 0;
169
170 const int nshard_rows = matrix_c->dist->rows.nshards;
171 const int nshard_cols = matrix_c->dist->cols.nshards;
172 int *shard_row_start = calloc(nshard_rows, sizeof(int));
173 int *shard_col_start = calloc(nshard_cols, sizeof(int));
174 assert(NULL != shard_row_start && NULL != shard_col_start);
175
176 const int *sum_index_sizes_a =
177 (transa) ? matrix_a->row_sizes : matrix_a->col_sizes;
178 const int *sum_index_sizes_b =
179 (transb) ? matrix_b->col_sizes : matrix_b->row_sizes;
180 const int *free_index_sizes_a =
181 (transa) ? matrix_a->col_sizes : matrix_a->row_sizes;
182 const int *free_index_sizes_b =
183 (transb) ? matrix_b->row_sizes : matrix_b->col_sizes;
184
185#pragma omp parallel reduction(+ : flop_sum)
186 {
187 // Thread-private array covering given work in piece-wise fashion.
188 dbm_task_t *batch =
190
191 // Blocks are ordered first by shard. Creating lookup tables of boundaries.
192#pragma omp for nowait
193 for (int iblock = 1; iblock < pack_a->nblocks; iblock++) {
194 const int shard_row = pack_a->blocks[iblock].free_index % nshard_rows;
195 const int prev_shard_row =
196 pack_a->blocks[iblock - 1].free_index % nshard_rows;
197 if (prev_shard_row != shard_row) {
198 shard_row_start[shard_row] = iblock;
199 }
200 }
201#pragma omp for
202 for (int jblock = 1; jblock < pack_b->nblocks; jblock++) {
203 const int shard_col = pack_b->blocks[jblock].free_index % nshard_cols;
204 const int prev_shard_col =
205 pack_b->blocks[jblock - 1].free_index % nshard_cols;
206 if (prev_shard_col != shard_col) {
207 shard_col_start[shard_col] = jblock;
208 }
209 }
210
211#pragma omp for collapse(2) DBM_OMP_SCHEDULE
212 for (int shard_row = 0; shard_row < nshard_rows; shard_row++) {
213 for (int shard_col = 0; shard_col < nshard_cols; shard_col++) {
214 const int ishard = shard_row * nshard_cols + shard_col;
215 dbm_shard_t *const shard_c = &matrix_c->shards[ishard];
216 int ntasks = 0;
217
218 // Use a merge-join to find pairs of blocks with matching sum indices.
219 // This utilizes that blocks within a shard are ordered by sum_index.
220 const int iblock_start = shard_row_start[shard_row];
221 int jblock_start = shard_col_start[shard_col];
222 for (int iblock = iblock_start; iblock < pack_a->nblocks; iblock++) {
223 const dbm_pack_block_t *blk_a = &pack_a->blocks[iblock];
224 if (blk_a->free_index % nshard_rows != shard_row) {
225 break;
226 }
227 for (int jblock = jblock_start; jblock < pack_b->nblocks; jblock++) {
228 const dbm_pack_block_t *blk_b = &pack_b->blocks[jblock];
229 if (blk_b->free_index % nshard_cols != shard_col) {
230 jblock = pack_b->nblocks; // break
231 continue;
232 }
233 if (blk_a->sum_index < blk_b->sum_index) {
234 jblock = pack_b->nblocks; // break
235 continue;
236 }
237 if (blk_a->sum_index > blk_b->sum_index) {
238 jblock_start++;
239 continue;
240 }
241 // Found block pair with blk_a->sum_index == blk_b->sum_index.
242
243 // Check norms.
244 const float result_norm = alpha2 * blk_a->norm * blk_b->norm;
245 if (result_norm < rows_max_eps[blk_a->free_index]) {
246 continue;
247 }
248
249 // Check block sizes.
250 const int m = free_index_sizes_a[blk_a->free_index];
251 const int n = free_index_sizes_b[blk_b->free_index];
252 const int k = sum_index_sizes_a[blk_a->sum_index];
253 assert(m == matrix_c->row_sizes[blk_a->free_index]);
254 assert(n == matrix_c->col_sizes[blk_b->free_index]);
255 assert(k == sum_index_sizes_b[blk_b->sum_index]);
256
257 // Get C block.
258 const int row = blk_a->free_index, col = blk_b->free_index;
259 dbm_block_t *blk_c = dbm_shard_lookup(shard_c, row, col);
260 if (blk_c == NULL && retain_sparsity) {
261 continue;
262 } else if (blk_c == NULL) {
263 assert(dbm_get_shard_index(matrix_c, row, col) == ishard);
264 assert(dbm_get_stored_coordinates(matrix_c, row, col) ==
265 matrix_c->dist->my_rank);
266 blk_c = dbm_shard_promise_new_block(shard_c, row, col, m * n);
267 }
268
269 // Count flops.
270 const int64_t task_flops = 2LL * m * n * k;
271 if (task_flops == 0) {
272 continue;
273 }
274 flop_sum += task_flops;
276
277 // Add block multiplication to batch.
278 batch[ntasks].m = m;
279 batch[ntasks].n = n;
280 batch[ntasks].k = k;
281 batch[ntasks].offset_a = blk_a->offset;
282 batch[ntasks].offset_b = blk_b->offset;
283 batch[ntasks].offset_c = blk_c->offset;
284 ++ntasks;
285
286 if (ntasks == DBM_MAX_BATCH_SIZE) {
287 backend_process_batch(ntasks, batch, alpha, pack_a, pack_b,
288 ishard, shard_c, false, force_cpu, context);
289 ntasks = 0;
290 }
291 }
292 }
293 backend_process_batch(ntasks, batch, alpha, pack_a, pack_b, ishard,
294 shard_c, true, force_cpu, context);
295 }
296 }
297
299 }
300
301 free(shard_row_start);
302 free(shard_col_start);
303
304 if (NULL != flop) {
305 *flop += flop_sum;
306 }
307}
308
309/*******************************************************************************
310 * \brief Performs a multiplication of two dbm_matrix_t matrices.
311 * See dbm_matrix.h for details.
312 * \author Ole Schuett
313 ******************************************************************************/
314void dbm_multiply(const bool transa, const bool transb, const double alpha,
315 const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b,
316 const double beta, dbm_matrix_t *matrix_c,
317 const bool retain_sparsity, const double filter_eps,
318 int64_t *flop) {
319 assert(omp_get_num_threads() == 1);
320 assert(matrix_a != NULL && matrix_b != NULL && matrix_c != NULL);
321
322 // Throughout the matrix multiplication code the "sum_index" and "free_index"
323 // denote the summation (aka dummy) and free index from the Einstein notation.
324 const int num_sum_index_a = (transa) ? matrix_a->nrows : matrix_a->ncols;
325 const int num_sum_index_b = (transb) ? matrix_b->ncols : matrix_b->nrows;
326 const int num_free_index_a = (transa) ? matrix_a->ncols : matrix_a->nrows;
327 const int num_free_index_b = (transb) ? matrix_b->nrows : matrix_b->ncols;
328
329 // Sanity check matrix dimensions.
330 assert(num_sum_index_a == num_sum_index_b);
331 assert(num_free_index_a == matrix_c->nrows);
332 assert(num_free_index_b == matrix_c->ncols);
333
334 // Prepare matrix_c (host).
335 dbm_scale(matrix_c, beta);
336
337 // Determine if validation shall be performed.
338 const char *const maxeps_env = getenv("DBM_MULTIPLY_MAXEPS");
339 const char *const verify_env = getenv("DBM_MULTIPLY_VERIFY");
340 const double maxeps = (NULL == maxeps_env ? 1E-1 : fabs(atof(maxeps_env)));
341 const int verify =
342 (NULL == verify_env ? (NULL == maxeps_env ? 0 : 1) : atoi(verify_env));
343 dbm_matrix_t *matrix_d = NULL;
344 if (0 != verify) {
345 dbm_distribution_t *const dist_shared = matrix_c->dist;
346 dbm_create(&matrix_d, dist_shared, matrix_c->name, matrix_c->nrows,
347 matrix_c->ncols, matrix_c->row_sizes, matrix_c->col_sizes);
348 dbm_copy(matrix_d, matrix_c);
349 }
350
351 // Compute filter thresholds for each row.
352 float *rows_max_eps = compute_rows_max_eps(transa, matrix_a, filter_eps);
353
354 // Start uploading matrix_c to the GPU.
355 backend_context_t *ctx = backend_start(matrix_c);
356
357 // Redistribute matrix_a and matrix_b across MPI ranks.
358 dbm_comm_iterator_t *iter =
359 dbm_comm_iterator_start(transa, transb, matrix_a, matrix_b, matrix_c);
360
361 // Count flops if requested.
362 if (NULL != flop) {
363 *flop = 0;
364 }
365
366 // Main loop.
367 dbm_pack_t *pack_a, *pack_b;
368 while (dbm_comm_iterator_next(iter, &pack_a, &pack_b)) {
369 const bool uploaded = backend_upload_packs(pack_a, pack_b, ctx);
370 (void)uploaded; // mark used
371 multiply_packs(transa, transb, alpha, pack_a, pack_b, matrix_a, matrix_b,
372 matrix_c, rows_max_eps, retain_sparsity, false /*!uploaded*/,
373 flop, ctx);
374 }
375
376 // Wait for all other MPI ranks to complete, then release ressources.
378 backend_stop(ctx);
379
380 if (NULL != matrix_d) {
381 iter =
382 dbm_comm_iterator_start(transa, transb, matrix_a, matrix_b, matrix_d);
383 while (dbm_comm_iterator_next(iter, &pack_a, &pack_b)) {
384 multiply_packs(transa, transb, alpha, pack_a, pack_b, matrix_a, matrix_b,
385 matrix_d, rows_max_eps, retain_sparsity, true, NULL, ctx);
386 }
388 const double epsilon = dbm_maxeps(matrix_d, matrix_c);
389 if (maxeps < epsilon) {
390 if (1 == verify) {
391 fprintf(stderr, "WARN ACC/LIBDBM: diff=%g\n", epsilon);
392 } else {
393 fprintf(stderr, "ERROR ACC/LIBDBM: diff=%g\n", epsilon);
394 exit(EXIT_FAILURE);
395 }
396 }
397 dbm_release(matrix_d);
398 }
399
400 // Release filter thresholds.
401 free(rows_max_eps);
402
403 // Final filter pass.
404 dbm_filter(matrix_c, filter_eps);
405}
406
407// EOF
#define DBM_MAX_BATCH_SIZE
static int imax(int x, int y)
Returns the larger of two given integers (missing from the C standard)
void dbm_library_counter_increment(const int m, const int n, const int k)
Add given block multiplication to stats. This routine is thread-safe.
double dbm_maxeps(const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b)
Calculates maximum relative difference between matrix_a and matrix_b.
Definition dbm_matrix.c:567
static int dbm_get_shard_index(const dbm_matrix_t *matrix, const int row, const int col)
Internal routine for getting a block's shard index.
Definition dbm_matrix.h:246
static int dbm_get_num_shards(const dbm_matrix_t *matrix)
Internal routine that returns the number of shards for given matrix.
Definition dbm_matrix.h:238
void * dbm_mempool_host_malloc(const size_t size)
Internal routine for allocating host memory from the pool.
void dbm_mempool_host_free(const void *memory)
Internal routine for releasing memory back to the pool.
void dbm_mpi_sum_int(int *values, const int count, const dbm_mpi_comm_t comm)
Wrapper around MPI_Allreduce for op MPI_SUM and datatype MPI_INT.
Definition dbm_mpi.c:305
static float * compute_rows_max_eps(const bool trans, const dbm_matrix_t *matrix, const double filter_eps)
Private routine for computing the max filter threshold for each row.
static backend_context_t * backend_start(const dbm_matrix_t *matrix_c)
Private routine for initializing the multiplication backend.
static bool backend_upload_packs(const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, backend_context_t *ctx)
Private routine for handing newly arrived packs to the backend.
static void backend_stop(backend_context_t *ctx)
Private routine for shutting down the multiplication backend.
static void multiply_packs(const bool transa, const bool transb, const double alpha, const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b, dbm_matrix_t *matrix_c, const float *rows_max_eps, const bool retain_sparsity, const bool force_cpu, int64_t *flop, backend_context_t *ctx)
Private routine for multipling two packs.
static void backend_process_batch(const int ntasks, const dbm_task_t batch[ntasks], const double alpha, const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, const int kshard, dbm_shard_t *shard_c, const bool finish, const bool force_cpu, backend_context_t *ctx)
Private routine for sending a batch to the multiplication backend.
dbm_comm_iterator_t * dbm_comm_iterator_start(const bool transa, const bool transb, const dbm_matrix_t *matrix_a, const dbm_matrix_t *matrix_b, const dbm_matrix_t *matrix_c)
Internal routine for creating a communication iterator.
void dbm_comm_iterator_stop(dbm_comm_iterator_t *iter)
Internal routine for releasing the given communication iterator.
bool dbm_comm_iterator_next(dbm_comm_iterator_t *iter, dbm_pack_t **pack_a, dbm_pack_t **pack_b)
Internal routine for retriving next pair of packs from given iterator.
void dbm_multiply_cpu_process_batch(int ntasks, const dbm_task_t batch[ntasks], double alpha, const dbm_pack_t *pack_a, const dbm_pack_t *pack_b, dbm_shard_t *shard_c, int options)
Internal routine for executing the tasks in given batch on the CPU.
@ DBM_MULTIPLY_BLAS_LIBRARY
@ DBM_MULTIPLY_TASK_REORDER
dbm_block_t * dbm_shard_promise_new_block(dbm_shard_t *shard, const int row, const int col, const int block_size)
Internal routine for allocating the metadata of a new block.
Definition dbm_shard.c:200
dbm_block_t * dbm_shard_lookup(const dbm_shard_t *shard, const int row, const int col)
Internal routine for looking up a block from a shard.
Definition dbm_shard.c:178
static void const int const int i
Private struct for storing the context of the multiplication backend.
Internal struct for storing a block's metadata.
Definition dbm_shard.h:20
Internal struct for storing a communication iterator.
Internal struct for storing a two dimensional distribution.
dbm_mpi_comm_t comm
Internal struct for storing a matrix.
Definition dbm_matrix.h:20
int * row_sizes
Definition dbm_matrix.h:25
char * name
Definition dbm_matrix.h:22
int * col_sizes
Definition dbm_matrix.h:26
dbm_shard_t * shards
Definition dbm_matrix.h:28
dbm_distribution_t * dist
Definition dbm_matrix.h:21
Internal struct for storing a dbm_block_t plus its norm.
Internal struct for storing a pack - essentially a shard for MPI.
dbm_pack_block_t * blocks
Internal struct for storing a matrix shard.
Definition dbm_shard.h:30
dbm_block_t * blocks
Definition dbm_shard.h:33
Internal struct for storing a task, ie. a single block multiplication.