(git:d18deda)
Loading...
Searching...
No Matches
dbm_miniapp.c
Go to the documentation of this file.
1/*----------------------------------------------------------------------------*/
2/* CP2K: A general program to perform molecular dynamics simulations */
3/* Copyright 2000-2025 CP2K developers group <https://cp2k.org> */
4/* */
5/* SPDX-License-Identifier: BSD-3-Clause */
6/*----------------------------------------------------------------------------*/
7
8#include <assert.h>
9#include <omp.h>
10#include <stdint.h>
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14
15#if defined(__LIBXSMM)
16#include <libxsmm.h>
17#endif
18
19#include "../offload/offload_library.h"
20#include "dbm_library.h"
21#include "dbm_matrix.h"
22#include "dbm_mpi.h"
23
24/*******************************************************************************
25 * \brief Wrapper for printf, passed to dbm_library_print_stats.
26 * \author Ole Schuett
27 ******************************************************************************/
28static void print_func(char *message, int output_unit) {
29 if (output_unit == 0) { // i.e. my_rank == 0
30 printf("%s", message);
31 }
32}
33
34/*******************************************************************************
35 * \brief Returns the smaller of the two integers (missing from the C standard).
36 * \author Ole Schuett
37 ******************************************************************************/
38static inline int imin(int x, int y) { return (x < y ? x : y); }
39
40/*******************************************************************************
41 * \brief Private routine for creating a distribution.
42 * \author Ole Schuett
43 ******************************************************************************/
44static dbm_distribution_t *create_dist(const int nrows, const int ncols,
45 const dbm_mpi_comm_t comm) {
46 int cart_dims[2], cart_periods[2], cart_coords[2];
47 dbm_mpi_cart_get(comm, 2, cart_dims, cart_periods, cart_coords);
48
49 // Create distribution.
50 int *row_dist = malloc(nrows * sizeof(int));
51 int *col_dist = malloc(ncols * sizeof(int));
52 assert(row_dist != NULL && col_dist != NULL);
53 for (int i = 0; i < nrows; i++) {
54 row_dist[i] = i % cart_dims[0];
55 }
56 for (int i = 0; i < ncols; i++) {
57 col_dist[i] = i % cart_dims[1];
58 }
59 const int fortran_comm = dbm_mpi_comm_c2f(comm);
60 dbm_distribution_t *dist = NULL;
61 dbm_distribution_new(&dist, fortran_comm, nrows, ncols, row_dist, col_dist);
62 free(row_dist);
63 free(col_dist);
64 return dist;
65}
66
67/*******************************************************************************
68 * \brief Private routine for creating a distribution and an empty matrix.
69 * \author Ole Schuett
70 ******************************************************************************/
71static dbm_matrix_t *
72create_some_matrix(const int nrows, const int ncols, const int nrows_min,
73 const int nrows_max, const int ncols_min,
74 const int ncols_max, const dbm_mpi_comm_t comm) {
75 // Create distribution.
76 dbm_distribution_t *dist = create_dist(nrows, ncols, comm);
77
78 // Create matrix.
79 int *row_sizes = malloc(nrows * sizeof(int));
80 int *col_sizes = malloc(ncols * sizeof(int));
81 assert(row_sizes != NULL && col_sizes != NULL);
82 assert(0 < nrows_min && nrows_min <= nrows_max);
83 assert(0 < ncols_min && ncols_min <= ncols_max);
84 if (nrows_min != nrows_max) {
85 const int row_size = nrows_max - nrows_min + 1;
86 for (int i = 0; i < nrows; i++) {
87 row_sizes[i] = rand() % row_size + 1;
88 }
89 } else {
90 for (int i = 0; i < nrows; i++) {
91 row_sizes[i] = nrows_max;
92 }
93 }
94 if (ncols_min != ncols_max) {
95 const int col_size = ncols_max - ncols_min + 1;
96 for (int i = 0; i < ncols; i++) {
97 col_sizes[i] = rand() % col_size + 1;
98 }
99 } else {
100 for (int i = 0; i < ncols; i++) {
101 col_sizes[i] = ncols_max;
102 }
103 }
104 dbm_matrix_t *matrix = NULL;
105 dbm_create(&matrix, dist, "some name", nrows, ncols, row_sizes, col_sizes);
106 dbm_distribution_release(dist);
107 free(row_sizes);
108 free(col_sizes);
109 return matrix;
110}
111
112/*******************************************************************************
113 * \brief Private routine for reserving all blocks of the given matrix.
114 * \author Ole Schuett
115 ******************************************************************************/
116static void reserve_all_blocks(dbm_matrix_t *matrix) {
117 int nrows, ncols;
118 const int *row_sizes, *col_sizes;
119 dbm_get_row_sizes(matrix, &nrows, &row_sizes);
120 dbm_get_col_sizes(matrix, &ncols, &col_sizes);
121
122#pragma omp parallel
123 {
124 int nblocks = 0;
125#pragma omp for collapse(2)
126 for (int row = 0; row < nrows; row++) {
127 for (int col = 0; col < ncols; col++) {
128 if (dbm_get_stored_coordinates(matrix, row, col) ==
129 matrix->dist->my_rank) {
130 ++nblocks;
131 }
132 }
133 }
134 int *reserve_row = malloc(nblocks * sizeof(int));
135 int *reserve_col = malloc(nblocks * sizeof(int));
136 assert(reserve_row != NULL && reserve_col != NULL);
137 int iblock = 0;
138#pragma omp for collapse(2)
139 for (int row = 0; row < nrows; row++) {
140 for (int col = 0; col < ncols; col++) {
141 if (dbm_get_stored_coordinates(matrix, row, col) ==
142 matrix->dist->my_rank) {
143 reserve_row[iblock] = row;
144 reserve_col[iblock] = col;
145 iblock++;
146 }
147 }
148 }
149 assert(iblock == nblocks);
150 dbm_reserve_blocks(matrix, nblocks, reserve_row, reserve_col);
151 free(reserve_row);
152 free(reserve_col);
153 }
154}
155
156/*******************************************************************************
157 * \brief Private routine for setting all blocks.
158 * \author Ole Schuett
159 ******************************************************************************/
160static void set_all_blocks(dbm_matrix_t *matrix) {
161#pragma omp parallel
162 {
163 dbm_iterator_t *iter = NULL;
164 dbm_iterator_start(&iter, matrix);
165 while (dbm_iterator_blocks_left(iter)) {
166 int row, col, row_size, col_size;
167 double *block;
168 dbm_iterator_next_block(iter, &row, &col, &block, &row_size, &col_size);
169 const int block_size = row_size * col_size;
170 for (int i = 0; i < block_size; i++) {
171#if defined(DBM_VALIDATE_AGAINST_LIBXSMM) && defined(__LIBXSMM)
172 block[i] = 1.0 / (i + 1);
173#else
174 block[i] = 1.0;
175#endif
176 }
177 }
178 dbm_iterator_stop(iter);
179 }
180}
181/*******************************************************************************
182 * \brief Run a benchmark of dbm_multiply with given block sizes.
183 * \author Ole Schuett
184 ******************************************************************************/
185void benchmark_multiply(const int M, const int N, const int K, const int m,
186 const int n, const int k, const dbm_mpi_comm_t comm) {
187#if defined(DBM_VALIDATE_AGAINST_LIBXSMM) && defined(__LIBXSMM)
188 dbm_matrix_t *matrix_a = create_some_matrix(M, K, 1, m, k, k, comm);
189 dbm_matrix_t *matrix_b = create_some_matrix(K, N, k, k, 1, n, comm);
190#else
191 dbm_matrix_t *matrix_a = create_some_matrix(M, K, m, m, k, k, comm);
192 dbm_matrix_t *matrix_b = create_some_matrix(K, N, k, k, n, n, comm);
193#endif
194 dbm_distribution_t *dist_c = create_dist(M, N, comm);
195 dbm_matrix_t *matrix_c = NULL;
196 dbm_create(&matrix_c, dist_c, "result", M, N, matrix_a->row_sizes,
197 matrix_b->col_sizes);
198 dbm_distribution_release(dist_c);
199
200 reserve_all_blocks(matrix_a);
201 reserve_all_blocks(matrix_b);
202 set_all_blocks(matrix_a);
203 set_all_blocks(matrix_b);
204
205 int64_t flop = 0;
206 const double time_start_multiply = omp_get_wtime();
207 dbm_multiply(false, false, 1.0, matrix_a, matrix_b, 1.0, matrix_c, false,
208 1e-8, &flop);
209 const double time_end_multiply = omp_get_wtime();
210
211 // Validate checksum.
212 // Since all matrix elements were set to 1.0 the checksum is an integer.
213#if defined(DBM_VALIDATE_AGAINST_LIBXSMM) && defined(__LIBXSMM)
214 const double expected = 0, checksum = 0;
215#else
216 const double expected = (uint64_t)M * m * N * n * K * K * k * k;
217 const double checksum = dbm_checksum(matrix_c);
218#endif
219
220 dbm_release(matrix_a);
221 dbm_release(matrix_b);
222 dbm_release(matrix_c);
223
224 if (dbm_mpi_comm_rank(comm) == 0) {
225 printf("%5i x %5i x %5i with %3i x %3i x %3i blocks: ", M, N, K, m, n, k);
226 }
227 if (checksum == expected) {
228 dbm_mpi_sum_int64(&flop, 1, comm);
229 if (dbm_mpi_comm_rank(comm) == 0) {
230 const double duration = time_end_multiply - time_start_multiply;
231 printf("%6.3f s => %6.1f GFLOP/s\n", duration, 1e-9 * flop / duration);
232 fflush(stdout);
233 }
234 } else {
235 printf("ERROR\n");
236 fprintf(stderr, "Expected checksum %f but got %f.\n", expected, checksum);
237 exit(1);
238 }
239}
240
241/*******************************************************************************
242 * \brief Stand-alone miniapp for smoke-testing and benchmarking dbm_multiply.
243 * \author Ole Schuett
244 ******************************************************************************/
245int main(int argc, char *argv[]) {
246 int result = EXIT_SUCCESS;
247
248 srand(25071975); // seed rng
249
250 dbm_mpi_init(&argc, &argv);
251 dbm_library_init();
252
253 const dbm_mpi_comm_t world_comm = dbm_mpi_get_comm_world();
254 const int nranks = dbm_mpi_comm_size(world_comm);
255 const int my_rank = dbm_mpi_comm_rank(world_comm);
256
257 if (offload_get_device_count() > 0) {
258 offload_set_chosen_device(my_rank % offload_get_device_count());
259 }
260
261 // Create 2D cart.
262 int dims[2] = {0, 0};
263 dbm_mpi_dims_create(nranks, 2, dims);
264 const int periods[2] = {true, true};
265 dbm_mpi_comm_t comm =
266 dbm_mpi_cart_create(world_comm, 2, dims, periods, false);
267
268 if (my_rank == 0) {
269 printf("OpenMP-threads: %i GPUs: %i", omp_get_max_threads(),
270 imin(offload_get_device_count(), nranks));
271#if defined(__LIBXSMM)
272 printf(" Libxsmm: %s", LIBXSMM_VERSION);
273#else
274 printf(" Libxsmm: n/a");
275#endif
276#if defined(__parallel)
277 printf(" MPI-ranks: %i MPI-cart: %i x %i", nranks, dims[0], dims[1]);
278#else
279 printf(" MPI: n/a");
280#endif
281 printf("\n\n");
282 fflush(stdout);
283 }
284
285 if (1 >= argc) {
286 benchmark_multiply(16384, 128, 128, 4, 4, 4, comm);
287 benchmark_multiply(128, 16384, 128, 4, 4, 4, comm);
288 benchmark_multiply(128, 128, 16384, 4, 4, 4, comm);
289 benchmark_multiply(645, 645, 645, 4, 4, 4, comm);
290 if (my_rank == 0)
291 printf("\n");
292
293 benchmark_multiply(60, 500, 500, 128, 4, 4, comm);
294 benchmark_multiply(500, 60, 500, 4, 128, 4, comm);
295 benchmark_multiply(500, 500, 60, 4, 4, 128, comm);
296 if (my_rank == 0)
297 printf("\n");
298
299 benchmark_multiply(500, 60, 60, 4, 128, 128, comm);
300 benchmark_multiply(60, 500, 60, 128, 4, 128, comm);
301 benchmark_multiply(60, 60, 500, 128, 128, 4, comm);
302 if (my_rank == 0)
303 printf("\n");
304
305 benchmark_multiply(350, 350, 350, 23, 23, 23, comm);
306 benchmark_multiply(250, 250, 250, 32, 32, 32, comm);
307 benchmark_multiply(60, 60, 60, 128, 128, 128, comm);
308 } else { /* read triplet(s) from file or one triplet from command line */
309 FILE *const file = fopen(argv[1], "r"); /* try 1st arg as filename */
310 char buffer[1024];
311 const char delims[] = "x,;:|/\t ";
312 int mnk[] = {0, 0, 0}, i = 1, j = 0;
313 while (i < argc &&
314 (NULL == file || NULL != fgets(buffer, sizeof(buffer), file))) {
315 const char *arg = strtok(NULL != file ? buffer : argv[i], delims);
316 for (; NULL != arg && j < 3; arg = strtok(NULL, delims), ++j) {
317 mnk[j] = atoi(arg);
318 }
319 if (NULL != file) {
320 j = 0;
321 } else if (++i < argc) {
322 continue;
323 }
324 if (0 < mnk[0]) { /* valid MxNxK? */
325 const int m = mnk[0];
326 const int n = (0 < mnk[1] ? mnk[1] : m);
327 const int k = (0 < mnk[2] ? mnk[2] : m);
328 int M = (NULL == arg ? 0 : atoi(arg)), N, K;
329 if (0 < M) {
330 arg = strtok(NULL, delims);
331 N = (NULL == arg ? 1 : atoi(arg));
332 arg = strtok(NULL, delims);
333 K = (NULL == arg ? 1 : atoi(arg));
334 } else { /* default */
335 M = N = K = 128;
336 }
337 benchmark_multiply(M, N, K, m, n, k, comm);
338 mnk[0] = mnk[1] = mnk[2] = 0;
339 } else {
340 fprintf(stderr, "ERROR: invalid argument(s)\n");
341 result = EXIT_FAILURE;
342 i = argc; /* break */
343 }
344 }
345 if (NULL != file) {
346 fclose(file);
347 }
348 }
349
350 if (EXIT_SUCCESS == result) {
351 dbm_library_print_stats(dbm_mpi_comm_c2f(comm), &print_func, my_rank);
352 }
353 dbm_library_finalize();
354 dbm_mpi_comm_free(&comm);
356 return result;
357}
358
359// EOF
void dbm_get_col_sizes(const dbm_matrix_t *matrix, int *ncols, const int **col_sizes)
Returns the column block sizes of the given matrix.
Definition dbm_matrix.c:600
void dbm_get_row_sizes(const dbm_matrix_t *matrix, int *nrows, const int **row_sizes)
Returns the row block sizes of the given matrix.
Definition dbm_matrix.c:590
static dbm_distribution_t * create_dist(const int nrows, const int ncols, const dbm_mpi_comm_t comm)
Private routine for creating a distribution.
Definition dbm_miniapp.c:44
static int imin(int x, int y)
Returns the smaller of the two integers (missing from the C standard).
Definition dbm_miniapp.c:38
static dbm_matrix_t * create_some_matrix(const int nrows, const int ncols, const int nrows_min, const int nrows_max, const int ncols_min, const int ncols_max, const dbm_mpi_comm_t comm)
Private routine for creating a distribution and an empty matrix.
Definition dbm_miniapp.c:72
static void set_all_blocks(dbm_matrix_t *matrix)
Private routine for setting all blocks.
static void print_func(char *message, int output_unit)
Wrapper for printf, passed to dbm_library_print_stats.
Definition dbm_miniapp.c:28
void benchmark_multiply(const int M, const int N, const int K, const int m, const int n, const int k, const dbm_mpi_comm_t comm)
Run a benchmark of dbm_multiply with given block sizes.
static void reserve_all_blocks(dbm_matrix_t *matrix)
Private routine for reserving all blocks of the given matrix.
void dbm_mpi_finalize()
Wrapper around MPI_Finalize.
Definition dbm_mpi.c:47
int dbm_mpi_comm_rank(const dbm_mpi_comm_t comm)
Wrapper around MPI_Comm_rank.
Definition dbm_mpi.c:95
int dbm_mpi_comm_size(const dbm_mpi_comm_t comm)
Wrapper around MPI_Comm_size.
Definition dbm_mpi.c:110
void dbm_mpi_init(int *argc, char ***argv)
Wrapper around MPI_Init.
Definition dbm_mpi.c:34
dbm_mpi_comm_t dbm_mpi_cart_create(const dbm_mpi_comm_t comm_old, const int ndims, const int dims[], const int periods[], const int reorder)
Wrapper around MPI_Cart_create.
Definition dbm_mpi.c:140
void dbm_mpi_sum_int64(int64_t *values, const int count, const dbm_mpi_comm_t comm)
Wrapper around MPI_Allreduce for op MPI_SUM and datatype MPI_INT64_T.
Definition dbm_mpi.c:325
void dbm_mpi_dims_create(const int nnodes, const int ndims, int dims[])
Wrapper around MPI_Dims_create.
Definition dbm_mpi.c:125
int dbm_mpi_comm_c2f(const dbm_mpi_comm_t comm)
Wrapper around MPI_Comm_c2f.
Definition dbm_mpi.c:82
void dbm_mpi_comm_free(dbm_mpi_comm_t *comm)
Wrapper around MPI_Comm_free.
Definition dbm_mpi.c:212
void dbm_mpi_cart_get(const dbm_mpi_comm_t comm, int maxdims, int dims[], int periods[], int coords[])
Wrapper around MPI_Cart_get.
Definition dbm_mpi.c:161
dbm_mpi_comm_t dbm_mpi_get_comm_world()
Returns MPI_COMM_WORLD.
Definition dbm_mpi.c:57
int dbm_mpi_comm_t
Definition dbm_mpi.h:19
static void const int const int i
int main()
Unit test of the C-interface provided via libcp2k.h.
Internal struct for storing a two dimensional distribution.
Internal struct for storing a block iterator.
Definition dbm_matrix.h:35
Internal struct for storing a matrix.
Definition dbm_matrix.h:20
int * row_sizes
Definition dbm_matrix.h:25
int * col_sizes
Definition dbm_matrix.h:26
dbm_distribution_t * dist
Definition dbm_matrix.h:21