(git:d43dca4)
Loading...
Searching...
No Matches
grid_dgemm_context.c
Go to the documentation of this file.
1/*----------------------------------------------------------------------------*/
2/* CP2K: A general program to perform molecular dynamics simulations */
3/* Copyright 2000-2026 CP2K developers group <https://cp2k.org> */
4/* */
5/* SPDX-License-Identifier: BSD-3-Clause */
6/*----------------------------------------------------------------------------*/
7
8#include <math.h>
9#include <omp.h>
10#include <stdio.h>
11#include <stdlib.h>
12#include <string.h>
13
14#include "../common/grid_library.h"
17#include "grid_dgemm_context.h"
21#include "grid_dgemm_utils.h"
22
23void return_dh(void *const ptr, const int level, double *const dh) {
24 grid_context *const ctx = (grid_context *)ptr;
25
26 assert(ctx->checksum == ctx_checksum);
27 dh[0] = ctx->grid[level].dh[0][0];
28 dh[1] = ctx->grid[level].dh[0][1];
29 dh[2] = ctx->grid[level].dh[0][2];
30 dh[3] = ctx->grid[level].dh[1][0];
31 dh[4] = ctx->grid[level].dh[1][1];
32 dh[5] = ctx->grid[level].dh[1][2];
33 dh[6] = ctx->grid[level].dh[2][0];
34 dh[7] = ctx->grid[level].dh[2][1];
35 dh[8] = ctx->grid[level].dh[2][2];
36}
37
38void return_dh_inv(void *const ptr, const int level, double *const dh_inv) {
39 grid_context *const ctx = (grid_context *)ptr;
40
41 assert(ctx->checksum == ctx_checksum);
42 dh_inv[0] = ctx->grid[level].dh_inv[0][0];
43 dh_inv[1] = ctx->grid[level].dh_inv[0][1];
44 dh_inv[2] = ctx->grid[level].dh_inv[0][2];
45 dh_inv[3] = ctx->grid[level].dh_inv[1][0];
46 dh_inv[4] = ctx->grid[level].dh_inv[1][1];
47 dh_inv[5] = ctx->grid[level].dh_inv[1][2];
48 dh_inv[6] = ctx->grid[level].dh_inv[2][0];
49 dh_inv[7] = ctx->grid[level].dh_inv[2][1];
50 dh_inv[8] = ctx->grid[level].dh_inv[2][2];
51}
52
53int return_num_devs(void *const ptr) {
54 grid_context *const ctx = (grid_context *)ptr;
55 assert(ctx->checksum == ctx_checksum);
56
57 return ctx->number_of_devices;
58}
59
60int return_device_id(void *const ptr, const int device) {
61 grid_context *const ctx = (grid_context *)ptr;
62 assert(ctx->checksum == ctx_checksum);
63
64 return ctx->device_id[device];
65}
66
67int is_grid_orthorhombic(void *const ptr) {
68 grid_context *const ctx = (grid_context *)ptr;
69 assert(ctx->checksum == ctx_checksum);
70 return ctx->orthorhombic;
71}
72
73void update_queue_length(void *const ptr, const int queue_length) {
74 grid_context *const ctx = (grid_context *)ptr;
75 assert(ctx->checksum == ctx_checksum);
76 ctx->queue_length = queue_length;
77}
78
79void update_atoms_position(const int natoms,
80 const double atoms_positions[natoms][3],
81 grid_context *data) {
82 assert(data != NULL);
83
84 if (natoms == 0)
85 return;
86
87 if (data->atom_positions == NULL) {
88 data->atom_positions = malloc(3 * natoms * sizeof(double));
89 } else {
90 if (natoms > data->natoms) {
91 data->atom_positions =
92 realloc(data->atom_positions, 3 * natoms * sizeof(double));
93 }
94 }
95 assert(data->atom_positions != NULL);
96
97 data->natoms = natoms;
98
99 if (data->atom_positions) {
100 for (int i = 0; i < natoms; i++) {
101 data->atom_positions[3 * i] = atoms_positions[i][0];
102 data->atom_positions[3 * i + 1] = atoms_positions[i][1];
103 data->atom_positions[3 * i + 2] = atoms_positions[i][2];
104 }
105 }
106}
107
108void update_atoms_kinds(const int natoms, const int *atoms_kinds,
109 grid_context *data) {
110 assert(data != NULL);
111
112 // data->atom_kinds is a table that give the type of a given atom.
113 if (natoms == 0)
114 return;
115
116 if (data->atom_kinds == NULL) {
117 data->atom_kinds = malloc(natoms * sizeof(int));
118 } else {
119 if ((natoms > data->natoms) && (data->natoms > 0)) {
120 data->atom_kinds = realloc(data->atom_kinds, natoms * sizeof(int));
121 }
122 }
123 assert(data->atom_kinds != NULL);
124 // data->natoms is initialized before calling this function
125 if (data->natoms)
126 memcpy(data->atom_kinds, atoms_kinds, sizeof(int) * natoms);
127
128 for (int i = 0; i < natoms; i++) {
129 data->atom_kinds[i] -= 1;
130 }
131}
132
133void update_block_offsets(const int nblocks, const int *const block_offsets,
134 grid_context *data) {
135 assert(data != NULL);
136
137 if (nblocks == 0)
138 return;
139
140 if (data->block_offsets == NULL) {
141 data->block_offsets = malloc(nblocks * sizeof(int));
142 } else {
143 if ((nblocks > data->nblocks_total) && (data->nblocks_total > 0)) {
144 data->block_offsets = realloc(data->block_offsets, sizeof(int) * nblocks);
145 }
146 }
147 assert(data->block_offsets != NULL);
148
149 data->nblocks = nblocks;
150 data->nblocks_total = imax(data->nblocks_total, nblocks);
151 if (nblocks)
152 memcpy(data->block_offsets, block_offsets, nblocks * sizeof(int));
153}
154
155void update_basis_set(const int nkinds, const grid_basis_set **const basis_sets,
156 grid_context *data) {
157 if (nkinds > data->nkinds_total) {
158 if (data->basis_sets == NULL) {
159 data->basis_sets = malloc(nkinds * sizeof(grid_basis_set *));
160 } else {
161 data->basis_sets =
162 realloc(data->basis_sets, nkinds * sizeof(grid_basis_set *));
163 }
164 }
165 assert(data->basis_sets != NULL);
166 data->nkinds = nkinds;
167 data->nkinds_total = imax(data->nkinds_total, nkinds);
168 memcpy(data->basis_sets, basis_sets, nkinds * sizeof(grid_basis_set *));
169}
170
171void update_task_lists(const int nlevels, const int ntasks,
172 const int *const level_list, const int *const iatom_list,
173 const int *const jatom_list, const int *const iset_list,
174 const int *const jset_list, const int *const ipgf_list,
175 const int *const jpgf_list,
176 const int *const border_mask_list,
177 const int *block_num_list,
178 const double *const radius_list,
179 const double rab_list[ntasks][3], grid_context *ctx) {
180
181 assert(ctx->checksum == ctx_checksum);
182
183 if (nlevels == 0)
184 return;
185
186 if (ctx->ntasks == 0) {
187 // Count tasks per level.
188 size_t size = nlevels * sizeof(int);
189 ctx->tasks_per_level = malloc(size);
190 ctx->tasks = malloc(nlevels * sizeof(_task *));
191 /* memset(ctx->tasks, 0, nlevels * sizeof(_task *)); */
192 if (ntasks)
193 ctx->tasks[0] = malloc(ntasks * sizeof(_task));
194 else
195 ctx->tasks[0] = NULL;
196 } else {
197 if (ctx->nlevels_total < nlevels) {
198 /* save the address of the full task list. NULL when completly empty */
199 ctx->tasks = realloc(ctx->tasks, nlevels * sizeof(_task *));
200 assert(ctx->tasks != NULL);
201 }
202 if (ctx->ntasks_total < ntasks) {
203 ctx->tasks[0] = realloc(ctx->tasks[0], ntasks * sizeof(_task));
204 assert(ctx->tasks[0] != NULL);
205 }
206 }
207
208 memset(ctx->tasks_per_level, 0, nlevels * sizeof(int));
209 ctx->nlevels = nlevels;
210 ctx->nlevels_total = imax(ctx->nlevels_total, nlevels);
211 ctx->ntasks_total = imax(ctx->ntasks_total, ntasks);
212 ctx->ntasks = ntasks;
213
214 for (int i = 0; i < ntasks; i++) {
215 ctx->tasks_per_level[level_list[i] - 1]++;
216 assert(i == 0 || level_list[i] >= level_list[i - 1]); // expect ordered list
217 }
218
219 for (int i = 1; i < ctx->nlevels; i++) {
220 ctx->tasks[i] = ctx->tasks[i - 1] + ctx->tasks_per_level[i - 1];
221 }
222
223 _task *const tasks = ctx->tasks[0];
224#pragma omp parallel for schedule(static) if (ntasks > GRID_OMP_MIN_ITERATIONS)
225 for (int i = 0; i < ntasks; i++) {
226 _task *const task = &tasks[i];
227 task->level = level_list[i] - 1;
228 task->iatom = iatom_list[i] - 1;
229 task->jatom = jatom_list[i] - 1;
230 task->iset = iset_list[i] - 1;
231 task->jset = jset_list[i] - 1;
232 task->ipgf = ipgf_list[i] - 1;
233 task->jpgf = jpgf_list[i] - 1;
234 task->border_mask = border_mask_list[i];
235 task->block_num = block_num_list[i] - 1;
236 task->radius = radius_list[i];
237 task->rab[0] = rab_list[i][0];
238 task->rab[1] = rab_list[i][1];
239 task->rab[2] = rab_list[i][2];
240 const int iatom = task->iatom;
241 const int jatom = task->jatom;
242 const int iset = task->iset;
243 const int jset = task->jset;
244 const int ipgf = task->ipgf;
245 const int jpgf = task->jpgf;
246 const int ikind = ctx->atom_kinds[iatom];
247 const int jkind = ctx->atom_kinds[jatom];
248 const grid_basis_set *ibasis = ctx->basis_sets[ikind];
249 const grid_basis_set *jbasis = ctx->basis_sets[jkind];
250 const int ncoseta = ncoset(ibasis->lmax[iset]);
251 const int ncosetb = ncoset(jbasis->lmax[jset]);
252
253 task->zeta[0] = ibasis->zet[iset * ibasis->maxpgf + ipgf];
254 task->zeta[1] = jbasis->zet[jset * jbasis->maxpgf + jpgf];
255
256 const double *ra = &ctx->atom_positions[3 * iatom];
257 const double zetp = task->zeta[0] + task->zeta[1];
258 const double f = task->zeta[1] / zetp;
259 const double rab2 = task->rab[0] * task->rab[0] +
260 task->rab[1] * task->rab[1] +
261 task->rab[2] * task->rab[2];
262
263 task->prefactor = exp(-task->zeta[0] * f * rab2);
264 task->zetp = zetp;
265
266 for (int i = 0; i < 3; i++) {
267 task->ra[i] = ra[i];
268 task->rp[i] = ra[i] + f * task->rab[i];
269 task->rb[i] = ra[i] + task->rab[i];
270 }
271
272 task->lmax[0] = ibasis->lmax[iset];
273 task->lmax[1] = jbasis->lmax[jset];
274 task->lmin[0] = ibasis->lmin[iset];
275 task->lmin[1] = jbasis->lmin[jset];
276
277 task->update_block_ = i == 0 || level_list[i] != level_list[i - 1] ||
278 block_num_list[i] != block_num_list[i - 1] ||
279 iset_list[i] != iset_list[i - 1] ||
280 jset_list[i] != jset_list[i - 1];
281
282 task->offset[0] = ipgf * ncoseta;
283 task->offset[1] = jpgf * ncosetb;
284 }
285
286 // Find largest Cartesian subblock size.
287 ctx->maxco = 0;
288 for (int i = 0; i < ctx->nkinds; i++) {
289 ctx->maxco = imax(ctx->maxco, ctx->basis_sets[i]->maxco);
290 }
291}
292
293void update_layouts(const int nlevels, const int npts_global[nlevels][3],
294 const int npts_local[nlevels][3],
295 const int shift_local[nlevels][3],
296 const int border_width[nlevels][3],
297 const double dh[nlevels][3][3],
298 const double dh_inv[nlevels][3][3], grid_context *ctx) {
299
300 assert(ctx != NULL);
301 assert(ctx->checksum == ctx_checksum);
302
303 if (ctx->layouts != NULL) {
304 free(ctx->layouts);
305 }
306
307 ctx->layouts = malloc(sizeof(_layout) * nlevels);
308
309 for (int level = 0; level < nlevels; level++) {
310 for (int i = 0; i < 3; i++) {
311 ctx->layouts[level].npts_global[i] = npts_global[level][i];
312 ctx->layouts[level].npts_local[i] = npts_local[level][i];
313 ctx->layouts[level].shift_local[i] = shift_local[level][i];
314 ctx->layouts[level].border_width[i] = border_width[level][i];
315 for (int j = 0; j < 3; j++) {
316 ctx->layouts[level].dh[i][j] = dh[level][i][j];
317 ctx->layouts[level].dh_inv[i][j] = dh_inv[level][i][j];
318 }
319 }
320 }
321}
322
323void update_grid(const int nlevels, grid_context *ctx) {
324 assert(ctx != NULL);
325 assert(ctx->checksum == ctx_checksum);
326
327 if (nlevels == 0)
328 return;
329
330 if (ctx->grid == NULL) {
331 ctx->grid = malloc(sizeof(tensor) * nlevels);
332 } else {
333 if (ctx->nlevels_total < nlevels) {
334 ctx->grid = realloc(ctx->grid, sizeof(tensor) * nlevels);
335 }
336 }
337 assert(ctx->grid != NULL);
338
339 ctx->nlevels_total = imax(ctx->nlevels_total, nlevels);
340 ctx->nlevels = nlevels;
341}
342
344 const bool orthorhombic, const int ntasks, const int nlevels,
345 const int natoms, const int nkinds, const int nblocks,
346 const int *block_offsets, const double atom_positions[natoms][3],
347 const int *const atom_kinds, const grid_basis_set **const basis_sets,
348 const int *const level_list, const int *const iatom_list,
349 const int *jatom_list, const int *const iset_list,
350 const int *const jset_list, const int *const ipgf_list,
351 const int *const jpgf_list, const int *const border_mask_list,
352 const int *block_num_list, const double *const radius_list,
353 const double rab_list[ntasks][3], const int npts_global[nlevels][3],
354 const int npts_local[nlevels][3], const int shift_local[nlevels][3],
355 const int border_width[nlevels][3], const double dh[nlevels][3][3],
356 const double dh_inv[nlevels][3][3]) {
357
358 grid_context *ctx = malloc(sizeof(grid_context));
359
360 memset(ctx, 0, sizeof(grid_context));
361
362 ctx->checksum = ctx_checksum;
363 ctx->orthorhombic = orthorhombic;
364 update_block_offsets(nblocks, block_offsets, ctx);
365 update_atoms_position(natoms, atom_positions, ctx);
366 update_atoms_kinds(natoms, atom_kinds, ctx);
367 update_basis_set(nkinds, basis_sets, ctx);
368 update_task_lists(nlevels, ntasks, level_list, iatom_list, jatom_list,
369 iset_list, jset_list, ipgf_list, jpgf_list,
370 border_mask_list, block_num_list, radius_list, rab_list,
371 ctx);
372 update_layouts(nlevels, npts_global, npts_local, shift_local, border_width,
373 dh, dh_inv, ctx);
374 update_grid(nlevels, ctx);
375
376 const int max_threads = omp_get_max_threads();
377
378 ctx->handler =
379 malloc(sizeof(struct collocation_integration_ *) * max_threads);
380
381 for (int i = 0; i < max_threads; i++) {
383 }
384
386
387 return ctx;
388}
389
391 const bool orthorhombic, const int ntasks, const int nlevels,
392 const int natoms, const int nkinds, const int nblocks,
393 const int *block_offsets, const double atom_positions[natoms][3],
394 const int *const atom_kinds, const grid_basis_set **const basis_sets,
395 const int *const level_list, const int *const iatom_list,
396 const int *jatom_list, const int *const iset_list,
397 const int *const jset_list, const int *const ipgf_list,
398 const int *const jpgf_list, const int *const border_mask_list,
399 const int *block_num_list, const double *const radius_list,
400 const double rab_list[ntasks][3], const int npts_global[nlevels][3],
401 const int npts_local[nlevels][3], const int shift_local[nlevels][3],
402 const int border_width[nlevels][3], const double dh[nlevels][3][3],
403 const double dh_inv[nlevels][3][3], void *ptr) {
404
405 assert(ptr != NULL);
406 grid_context *ctx = (grid_context *)ptr;
407 assert(ctx->checksum == ctx_checksum);
408
409 ctx->orthorhombic = orthorhombic;
410 update_block_offsets(nblocks, block_offsets, ctx);
411 update_atoms_position(natoms, atom_positions, ctx);
412 update_atoms_kinds(natoms, atom_kinds, ctx);
413 update_basis_set(nkinds, basis_sets, ctx);
414 update_task_lists(nlevels, ntasks, level_list, iatom_list, jatom_list,
415 iset_list, jset_list, ipgf_list, jpgf_list,
416 border_mask_list, block_num_list, radius_list, rab_list,
417 ctx);
418 update_layouts(nlevels, npts_global, npts_local, shift_local, border_width,
419 dh, dh_inv, ctx);
420 update_grid(nlevels, ctx);
421
422 // Find largest Cartesian subblock size.
423 ctx->maxco = 0;
424 for (int i = 0; i < nkinds; i++) {
425 ctx->maxco = imax(ctx->maxco, ctx->basis_sets[i]->maxco);
426 }
427}
428
429void initialize_grid_context_on_gpu(void *ptr, const int number_of_devices,
430 const int *device_id) {
431 assert(ptr != NULL);
432 grid_context *ctx = (grid_context *)ptr;
433 assert(ctx->checksum == ctx_checksum);
434 ctx->work_on_gpu = false;
435 if (number_of_devices <= 0) {
436 return;
437 }
438
439 ctx->number_of_devices = number_of_devices;
440 ctx->queue_length = 8192;
441 if (ctx->device_id == NULL) {
442 ctx->device_id = malloc(sizeof(int) * number_of_devices);
443 } else {
444 ctx->device_id = realloc(ctx->device_id, sizeof(int) * number_of_devices);
445 }
446 assert(ctx->device_id != NULL);
447
448 memcpy(ctx->device_id, device_id, sizeof(int) * number_of_devices);
449}
450
452 assert(ptr);
453 grid_context *ctx = (grid_context *)ptr;
454 assert(ctx->checksum == ctx_checksum);
455 free(ctx->block_offsets);
456 free(ctx->atom_positions);
457 free(ctx->atom_kinds);
458 free(ctx->basis_sets);
459 free(ctx->tasks[0]);
460 free(ctx->tasks);
461 free(ctx->tasks_per_level);
462 free(ctx->layouts);
463 free(ctx->grid);
464 if (ctx->device_id)
465 free(ctx->device_id);
466
467 if (ctx->handler) {
468 for (int i = 0; i < ctx->number_of_handler; i++) {
470 }
471 free(ctx->handler);
472 }
473
474 free(ctx);
475}
476
477void apply_cutoff(void *ptr) {
478 assert(ptr);
479 grid_context *ctx = (grid_context *)ptr;
480 assert(ctx->checksum == ctx_checksum);
481 ctx->apply_cutoff = true;
482}
483
485 tensor *grid, const bool orthorhombic,
486 const int grid_full_size[3], /* size of the full grid */
487 const int grid_local_size[3], /* size of the local grid block */
488 const int shift_local[3], /* coordinates of the lower coordinates of the
489 local grid window */
490 const int border_width[3], /* width of the borders */
491 const double
492 dh[3][3], /* displacement vectors of the grid (cartesian) -> (ijk) */
493 const double dh_inv[3][3], /* (ijk) -> (x,y,z) */
494 offload_buffer *grid_) {
495 memset(grid, 0, sizeof(tensor));
496 initialize_tensor_3(grid, grid_local_size[2], grid_local_size[1],
497 grid_local_size[0]);
498
499 grid->data = grid_->host_buffer;
500 grid->ld_ = grid_local_size[0];
501
502 setup_global_grid_size(grid, &grid_full_size[0]);
503
504 /* the grid is divided over several ranks or not periodic */
505 if ((grid_local_size[0] != grid_full_size[0]) ||
506 (grid_local_size[1] != grid_full_size[1]) ||
507 (grid_local_size[2] != grid_full_size[2])) {
508 setup_grid_window(grid, shift_local, border_width, 0);
509 } else {
510 grid->window_shift[0] = 0;
511 grid->window_shift[1] = 0;
512 grid->window_shift[2] = 0;
513
514 grid->window_size[0] = grid->size[0];
515 grid->window_size[1] = grid->size[1];
516 grid->window_size[2] = grid->size[2];
517 }
518
519 grid->dh[0][0] = dh[0][0];
520 grid->dh[0][1] = dh[0][1];
521 grid->dh[0][2] = dh[0][2];
522 grid->dh[1][0] = dh[1][0];
523 grid->dh[1][1] = dh[1][1];
524 grid->dh[1][2] = dh[1][2];
525 grid->dh[2][0] = dh[2][0];
526 grid->dh[2][1] = dh[2][1];
527 grid->dh[2][2] = dh[2][2];
528
529 grid->dh_inv[0][0] = dh_inv[0][0];
530 grid->dh_inv[0][1] = dh_inv[0][1];
531 grid->dh_inv[0][2] = dh_inv[0][2];
532 grid->dh_inv[1][0] = dh_inv[1][0];
533 grid->dh_inv[1][1] = dh_inv[1][1];
534 grid->dh_inv[1][2] = dh_inv[1][2];
535 grid->dh_inv[2][0] = dh_inv[2][0];
536 grid->dh_inv[2][1] = dh_inv[2][1];
537 grid->dh_inv[2][2] = dh_inv[2][2];
538
539 verify_orthogonality(dh, grid->orthogonal);
540
541 if (orthorhombic) {
542 grid->orthogonal[0] = true;
543 grid->orthogonal[1] = true;
544 grid->orthogonal[2] = true;
545 }
546}
547
548/*******************************************************************************
549 * \brief Allocates a task list for the dgemm backend.
550 * See grid_task_list.h for details.
551 ******************************************************************************/
553 const bool orthorhombic, const int ntasks, const int nlevels,
554 const int natoms, const int nkinds, const int nblocks,
555 const int block_offsets[nblocks], const double atom_positions[natoms][3],
556 const int atom_kinds[natoms], const grid_basis_set *basis_sets[nkinds],
557 const int level_list[ntasks], const int iatom_list[ntasks],
558 const int jatom_list[ntasks], const int iset_list[ntasks],
559 const int jset_list[ntasks], const int ipgf_list[ntasks],
560 const int jpgf_list[ntasks], const int border_mask_list[ntasks],
561 const int block_num_list[ntasks], const double radius_list[ntasks],
562 const double rab_list[ntasks][3], const int npts_global[nlevels][3],
563 const int npts_local[nlevels][3], const int shift_local[nlevels][3],
564 const int border_width[nlevels][3], const double dh[nlevels][3][3],
565 const double dh_inv[nlevels][3][3], grid_dgemm_task_list **task_list) {
566
567 if (*task_list == NULL) {
568 *task_list = create_grid_context_dgemm(
569 orthorhombic, ntasks, nlevels, natoms, nkinds, nblocks, block_offsets,
570 atom_positions, atom_kinds, basis_sets, level_list, iatom_list,
571 jatom_list, iset_list, jset_list, ipgf_list, jpgf_list,
572 border_mask_list, block_num_list, radius_list, rab_list, npts_global,
573 npts_local, shift_local, border_width, dh, dh_inv);
574 } else {
576 orthorhombic, ntasks, nlevels, natoms, nkinds, nblocks, block_offsets,
577 atom_positions, atom_kinds, basis_sets, level_list, iatom_list,
578 jatom_list, iset_list, jset_list, ipgf_list, jpgf_list,
579 border_mask_list, block_num_list, radius_list, rab_list, npts_global,
580 npts_local, shift_local, border_width, dh, dh_inv, *task_list);
581 }
582
584 if (config.apply_cutoff) {
585 apply_cutoff(*task_list);
586 }
587}
588
589/*******************************************************************************
590 * \brief Deallocates given task list, basis_sets have to be freed separately.
591 ******************************************************************************/
static int imax(int x, int y)
Returns the larger of two given integers (missing from the C standard)
static int max_threads
Definition dbm_library.c:24
static void const int const int const int const int const int const double const int const int const int int GRID_CONST_WHEN_COLLOCATE double GRID_CONST_WHEN_INTEGRATE double * grid
static void const int const int i
static void const int const int const int const int const int const double const int const int const int npts_local[3]
void collocate_destroy_handle(void *gaussian_handle)
struct collocation_integration_ * collocate_create_handle(void)
void update_layouts(const int nlevels, const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3], grid_context *ctx)
void update_grid(const int nlevels, grid_context *ctx)
void update_atoms_kinds(const int natoms, const int *atoms_kinds, grid_context *data)
void update_grid_context_dgemm(const bool orthorhombic, const int ntasks, const int nlevels, const int natoms, const int nkinds, const int nblocks, const int *block_offsets, const double atom_positions[natoms][3], const int *const atom_kinds, const grid_basis_set **const basis_sets, const int *const level_list, const int *const iatom_list, const int *jatom_list, const int *const iset_list, const int *const jset_list, const int *const ipgf_list, const int *const jpgf_list, const int *const border_mask_list, const int *block_num_list, const double *const radius_list, const double rab_list[ntasks][3], const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3], void *ptr)
void * create_grid_context_dgemm(const bool orthorhombic, const int ntasks, const int nlevels, const int natoms, const int nkinds, const int nblocks, const int *block_offsets, const double atom_positions[natoms][3], const int *const atom_kinds, const grid_basis_set **const basis_sets, const int *const level_list, const int *const iatom_list, const int *jatom_list, const int *const iset_list, const int *const jset_list, const int *const ipgf_list, const int *const jpgf_list, const int *const border_mask_list, const int *block_num_list, const double *const radius_list, const double rab_list[ntasks][3], const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3])
int is_grid_orthorhombic(void *const ptr)
void update_block_offsets(const int nblocks, const int *const block_offsets, grid_context *data)
int return_device_id(void *const ptr, const int device)
void destroy_grid_context_dgemm(void *ptr)
void return_dh(void *const ptr, const int level, double *const dh)
void update_task_lists(const int nlevels, const int ntasks, const int *const level_list, const int *const iatom_list, const int *const jatom_list, const int *const iset_list, const int *const jset_list, const int *const ipgf_list, const int *const jpgf_list, const int *const border_mask_list, const int *block_num_list, const double *const radius_list, const double rab_list[ntasks][3], grid_context *ctx)
void apply_cutoff(void *ptr)
int return_num_devs(void *const ptr)
void return_dh_inv(void *const ptr, const int level, double *const dh_inv)
void update_basis_set(const int nkinds, const grid_basis_set **const basis_sets, grid_context *data)
void grid_dgemm_create_task_list(const bool orthorhombic, const int ntasks, const int nlevels, const int natoms, const int nkinds, const int nblocks, const int block_offsets[nblocks], const double atom_positions[natoms][3], const int atom_kinds[natoms], const grid_basis_set *basis_sets[nkinds], const int level_list[ntasks], const int iatom_list[ntasks], const int jatom_list[ntasks], const int iset_list[ntasks], const int jset_list[ntasks], const int ipgf_list[ntasks], const int jpgf_list[ntasks], const int border_mask_list[ntasks], const int block_num_list[ntasks], const double radius_list[ntasks], const double rab_list[ntasks][3], const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3], grid_dgemm_task_list **task_list)
Allocates a task list for the dgemm backend. See grid_task_list.h for details.
void update_atoms_position(const int natoms, const double atoms_positions[natoms][3], grid_context *data)
void grid_dgemm_free_task_list(grid_dgemm_task_list *task_list)
Deallocates given task list, basis_sets have to be freed separately.
void update_queue_length(void *const ptr, const int queue_length)
void set_grid_parameters(tensor *grid, const bool orthorhombic, const int grid_full_size[3], const int grid_local_size[3], const int shift_local[3], const int border_width[3], const double dh[3][3], const double dh_inv[3][3], offload_buffer *grid_)
void initialize_grid_context_on_gpu(void *ptr, const int number_of_devices, const int *device_id)
static void setup_grid_window(tensor *const grid, const int *const shift_local, const int *const border_width, const int border_mask)
static void setup_global_grid_size(tensor *const grid, const int *const full_size)
static void initialize_tensor_3(struct tensor_ *a, int n1, int n2, int n3)
void verify_orthogonality(const double dh[3][3], bool orthogonal[3])
static grid_library_config config
grid_library_config grid_library_get_config(void)
Returns the library config.
double dh_inv[3][3]
Internal representation of a basis set.
grid_basis_set ** basis_sets
struct collocation_integration_ ** handler
Configuration of the grid library.
Internal representation of a buffer.
double * host_buffer
double dh[3][3]
double dh_inv[3][3]