26 const bool orthorhombic,
const int ntasks,
const int nlevels,
27 const int natoms,
const int nkinds,
const int nblocks,
28 const int block_offsets[nblocks],
const double atom_positions[natoms][3],
29 const int atom_kinds[natoms],
const grid_basis_set *basis_sets[nkinds],
30 const int level_list[ntasks],
const int iatom_list[ntasks],
31 const int jatom_list[ntasks],
const int iset_list[ntasks],
32 const int jset_list[ntasks],
const int ipgf_list[ntasks],
33 const int jpgf_list[ntasks],
const int border_mask_list[ntasks],
34 const int block_num_list[ntasks],
const double radius_list[ntasks],
35 const double rab_list[ntasks][3],
const int npts_global[nlevels][3],
36 const int npts_local[nlevels][3],
const int shift_local[nlevels][3],
37 const int border_width[nlevels][3],
const double dh[nlevels][3][3],
38 const double dh_inv[nlevels][3][3],
grid_task_list **task_list_out) {
44 if (*task_list_out == NULL) {
51 #if defined(__OFFLOAD_HIP) && !defined(__NO_OFFLOAD_GRID)
53 #elif defined(__OFFLOAD) && !defined(__NO_OFFLOAD_GRID)
63 task_list = *task_list_out;
69 size_t size = nlevels * 3 *
sizeof(int);
75 orthorhombic, ntasks, nlevels, natoms, nkinds, nblocks, block_offsets,
76 atom_positions, atom_kinds, basis_sets, level_list, iatom_list,
77 jatom_list, iset_list, jset_list, ipgf_list, jpgf_list, border_mask_list,
78 block_num_list, radius_list, rab_list, npts_global,
npts_local,
79 shift_local, border_width, dh, dh_inv, &task_list->
ref);
87 orthorhombic, ntasks, nlevels, natoms, nkinds, nblocks, block_offsets,
88 atom_positions, atom_kinds, basis_sets, level_list, iatom_list,
89 jatom_list, iset_list, jset_list, ipgf_list, jpgf_list,
90 border_mask_list, block_num_list, radius_list, rab_list, npts_global,
91 npts_local, shift_local, border_width, dh, dh_inv, &task_list->
cpu);
95 orthorhombic, ntasks, nlevels, natoms, nkinds, nblocks, block_offsets,
96 atom_positions, atom_kinds, basis_sets, level_list, iatom_list,
97 jatom_list, iset_list, jset_list, ipgf_list, jpgf_list,
98 border_mask_list, block_num_list, radius_list, rab_list, npts_global,
103 #if (defined(__OFFLOAD) && !defined(__NO_OFFLOAD_GRID))
104 grid_gpu_create_task_list(
105 orthorhombic, ntasks, nlevels, natoms, nkinds, nblocks, block_offsets,
106 atom_positions, atom_kinds, basis_sets, level_list, iatom_list,
107 jatom_list, iset_list, jset_list, ipgf_list, jpgf_list,
108 border_mask_list, block_num_list, radius_list, rab_list, npts_global,
109 npts_local, shift_local, border_width, dh, dh_inv, &task_list->gpu);
112 "Error: The GPU grid backend is not available. "
113 "Please re-compile with -D__OFFLOAD_CUDA or -D__OFFLOAD_HIP");
119 #if defined(__OFFLOAD_HIP) && !defined(__NO_OFFLOAD_GRID)
120 grid_hip_create_task_list(
121 orthorhombic, ntasks, nlevels, natoms, nkinds, nblocks, block_offsets,
122 &atom_positions[0][0], atom_kinds, basis_sets, level_list, iatom_list,
123 jatom_list, iset_list, jset_list, ipgf_list, jpgf_list,
124 border_mask_list, block_num_list, radius_list, &rab_list[0][0],
125 &npts_global[0][0], &
npts_local[0][0], &shift_local[0][0],
126 &border_width[0][0], &dh[0][0][0], &dh_inv[0][0][0], &task_list->hip);
128 fprintf(stderr,
"Error: The HIP grid backend is not available. "
129 "Please re-compile with -D__OFFLOAD_HIP");
135 printf(
"Error: Unknown grid backend: %i.\n",
config.
backend);
140 *task_list_out = task_list;
149 if (task_list->
ref != NULL) {
151 task_list->
ref = NULL;
153 if (task_list->
cpu != NULL) {
155 task_list->
cpu = NULL;
157 if (task_list->
dgemm != NULL) {
159 task_list->
dgemm = NULL;
161 #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_GRID)
162 if (task_list->gpu != NULL) {
163 grid_gpu_free_task_list(task_list->gpu);
164 task_list->gpu = NULL;
167 #if defined(__OFFLOAD_HIP) && !defined(__NO_OFFLOAD_GRID)
168 if (task_list->hip != NULL) {
169 grid_hip_free_task_list(task_list->hip);
170 task_list->hip = NULL;
184 const enum grid_func func,
const int nlevels,
190 assert(task_list->
nlevels == nlevels);
191 for (
int ilevel = 0; ilevel < nlevels; ilevel++) {
210 #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_GRID)
212 grid_gpu_collocate_task_list(task_list->gpu, func, nlevels, pab_blocks,
216 #if defined(__OFFLOAD_HIP) && !defined(__NO_OFFLOAD_GRID)
218 grid_hip_collocate_task_list(task_list->hip, func, nlevels, pab_blocks,
223 printf(
"Error: Unknown grid backend: %i.\n", task_list->
backend);
232 for (
int level = 0; level < nlevels; level++) {
233 const int npts_local_total =
235 grids_ref[level] = NULL;
244 const double tolerance = 1e-12;
245 double max_rel_diff = 0.0;
246 for (
int level = 0; level < nlevels; level++) {
248 for (
int j = 0; j <
npts_local[level][1]; j++) {
249 for (
int k = 0; k <
npts_local[level][2]; k++) {
253 const double test_value = grids[level]->host_buffer[
idx];
254 const double diff = fabs(test_value - ref_value);
255 const double rel_diff = diff / fmax(1.0, fabs(ref_value));
256 max_rel_diff = fmax(max_rel_diff, rel_diff);
257 if (rel_diff > tolerance) {
258 fprintf(stderr,
"Error: Validation failure in grid collocate\n");
259 fprintf(stderr,
" diff: %le\n", diff);
260 fprintf(stderr,
" rel_diff: %le\n", rel_diff);
261 fprintf(stderr,
" value: %le\n", ref_value);
262 fprintf(stderr,
" level: %i\n", level);
263 fprintf(stderr,
" ijk: %i %i %i\n",
i, j, k);
270 printf(
"Validated grid collocate, max rel. diff: %le\n", max_rel_diff);
281 const grid_task_list *task_list,
const bool compute_tau,
const int natoms,
282 const int nlevels,
const int npts_local[nlevels][3],
284 offload_buffer *hab_blocks,
double forces[natoms][3],
double virial[3][3]) {
287 assert(task_list->
nlevels == nlevels);
288 for (
int ilevel = 0; ilevel < nlevels; ilevel++) {
294 assert(forces == NULL || pab_blocks != NULL);
295 assert(virial == NULL || pab_blocks != NULL);
298 #if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_GRID)
300 grid_gpu_integrate_task_list(task_list->gpu, compute_tau, natoms, nlevels,
301 pab_blocks, grids, hab_blocks, forces, virial);
304 #if defined(__OFFLOAD_HIP) && !defined(__NO_OFFLOAD_GRID)
306 grid_hip_integrate_task_list(task_list->hip, compute_tau, nlevels,
307 pab_blocks, grids, hab_blocks, &forces[0][0],
313 nlevels, pab_blocks, grids, hab_blocks,
318 pab_blocks, grids, hab_blocks, forces, virial);
322 pab_blocks, grids, hab_blocks, forces, virial);
325 printf(
"Error: Unknown grid backend: %i.\n", task_list->
backend);
333 const int hab_length = hab_blocks->
size /
sizeof(double);
336 double forces_ref[natoms][3], virial_ref[3][3];
340 pab_blocks, grids, hab_blocks_ref,
341 (forces != NULL) ? forces_ref : NULL,
342 (virial != NULL) ? virial_ref : NULL);
345 const double hab_tolerance = 1e-12;
346 double hab_max_rel_diff = 0.0;
347 for (
int i = 0;
i < hab_length;
i++) {
348 const double ref_value = hab_blocks_ref->
host_buffer[
i];
350 const double diff = fabs(test_value - ref_value);
351 const double rel_diff = diff / fmax(1.0, fabs(ref_value));
352 hab_max_rel_diff = fmax(hab_max_rel_diff, rel_diff);
353 if (rel_diff > hab_tolerance) {
354 fprintf(stderr,
"Error: Validation failure in grid integrate\n");
355 fprintf(stderr,
" hab diff: %le\n", diff);
356 fprintf(stderr,
" hab rel_diff: %le\n", rel_diff);
357 fprintf(stderr,
" hab value: %le\n", ref_value);
358 fprintf(stderr,
" hab i: %i\n",
i);
364 const double forces_tolerance = 1e-8;
365 double forces_max_rel_diff = 0.0;
366 if (forces != NULL) {
367 for (
int iatom = 0; iatom < natoms; iatom++) {
368 for (
int idir = 0; idir < 3; idir++) {
369 const double ref_value = forces_ref[iatom][idir];
370 const double test_value = forces[iatom][idir];
371 const double diff = fabs(test_value - ref_value);
372 const double rel_diff = diff / fmax(1.0, fabs(ref_value));
373 forces_max_rel_diff = fmax(forces_max_rel_diff, rel_diff);
374 if (rel_diff > forces_tolerance) {
375 fprintf(stderr,
"Error: Validation failure in grid integrate\n");
376 fprintf(stderr,
" forces diff: %le\n", diff);
377 fprintf(stderr,
" forces rel_diff: %le\n", rel_diff);
378 fprintf(stderr,
" forces value: %le\n", ref_value);
379 fprintf(stderr,
" forces atom: %i\n", iatom);
380 fprintf(stderr,
" forces dir: %i\n", idir);
388 const double virial_tolerance = 1e-8;
389 double virial_max_rel_diff = 0.0;
390 if (virial != NULL) {
391 for (
int i = 0;
i < 3;
i++) {
392 for (
int j = 0; j < 3; j++) {
393 const double ref_value = virial_ref[
i][j];
394 const double test_value = virial[
i][j];
395 const double diff = fabs(test_value - ref_value);
396 const double rel_diff = diff / fmax(1.0, fabs(ref_value));
397 virial_max_rel_diff = fmax(virial_max_rel_diff, rel_diff);
398 if (rel_diff > virial_tolerance) {
399 fprintf(stderr,
"Error: Validation failure in grid integrate\n");
400 fprintf(stderr,
" virial diff: %le\n", diff);
401 fprintf(stderr,
" virial rel_diff: %le\n", rel_diff);
402 fprintf(stderr,
" virial value: %le\n", ref_value);
403 fprintf(stderr,
" virial ij: %i %i\n",
i, j);
410 printf(
"Validated grid_integrate, max rel. diff: %le %le %le\n",
411 hab_max_rel_diff, forces_max_rel_diff, virial_max_rel_diff);
static GRID_HOST_DEVICE int idx(const orbital a)
Return coset index of given orbital angular momentum.
static void const int const int i
static void const int const int const int const int const int const double const int const int const int npts_local[3]
void grid_cpu_free_task_list(grid_cpu_task_list *task_list)
Deallocates given task list, basis_sets have to be freed separately.
void grid_cpu_create_task_list(const bool orthorhombic, const int ntasks, const int nlevels, const int natoms, const int nkinds, const int nblocks, const int block_offsets[nblocks], const double atom_positions[natoms][3], const int atom_kinds[natoms], const grid_basis_set *basis_sets[nkinds], const int level_list[ntasks], const int iatom_list[ntasks], const int jatom_list[ntasks], const int iset_list[ntasks], const int jset_list[ntasks], const int ipgf_list[ntasks], const int jpgf_list[ntasks], const int border_mask_list[ntasks], const int block_num_list[ntasks], const double radius_list[ntasks], const double rab_list[ntasks][3], const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3], grid_cpu_task_list **task_list_out)
Allocates a task list for the cpu backend. See grid_task_list.h for details.
void grid_cpu_collocate_task_list(const grid_cpu_task_list *task_list, const enum grid_func func, const int nlevels, const offload_buffer *pab_blocks, offload_buffer *grids[nlevels])
Collocate all tasks of in given list onto given grids. See grid_task_list.h for details.
void grid_cpu_integrate_task_list(const grid_cpu_task_list *task_list, const bool compute_tau, const int natoms, const int nlevels, const offload_buffer *pab_blocks, const offload_buffer *grids[nlevels], offload_buffer *hab_blocks, double forces[natoms][3], double virial[3][3])
Integrate all tasks of in given list from given grids. See grid_task_list.h for details.
void grid_dgemm_collocate_task_list(grid_dgemm_task_list *const ptr, const enum grid_func func, const int nlevels, const offload_buffer *pab_blocks, offload_buffer *grids[nlevels])
Collocate all tasks of a given list onto given grids. See grid_task_list.h for details.
void grid_dgemm_create_task_list(const bool orthorhombic, const int ntasks, const int nlevels, const int natoms, const int nkinds, const int nblocks, const int block_offsets[nblocks], const double atom_positions[natoms][3], const int atom_kinds[natoms], const grid_basis_set *basis_sets[nkinds], const int level_list[ntasks], const int iatom_list[ntasks], const int jatom_list[ntasks], const int iset_list[ntasks], const int jset_list[ntasks], const int ipgf_list[ntasks], const int jpgf_list[ntasks], const int border_mask_list[ntasks], const int block_num_list[ntasks], const double radius_list[ntasks], const double rab_list[ntasks][3], const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3], grid_dgemm_task_list **task_list)
Allocates a task list for the dgemm backend. See grid_task_list.h for details.
void grid_dgemm_free_task_list(grid_dgemm_task_list *task_list)
Deallocates given task list, basis_sets have to be freed separately.
void grid_dgemm_integrate_task_list(void *ptr, const bool compute_tau, const int natoms, const int nlevels, const offload_buffer *const pab_blocks, offload_buffer *grids[nlevels], offload_buffer *hab_blocks, double forces[natoms][3], double virial[3][3])
Integrate all tasks of in given list from given grids using matrix - matrix multiplication.
static grid_library_config config
grid_library_config grid_library_get_config(void)
Returns the library config.
void grid_ref_collocate_task_list(const grid_ref_task_list *task_list, const enum grid_func func, const int nlevels, const offload_buffer *pab_blocks, offload_buffer *grids[nlevels])
Collocate all tasks of in given list onto given grids. See grid_task_list.h for details.
void grid_ref_free_task_list(grid_ref_task_list *task_list)
Deallocates given task list, basis_sets have to be freed separately.
void grid_ref_integrate_task_list(const grid_ref_task_list *task_list, const bool compute_tau, const int natoms, const int nlevels, const offload_buffer *pab_blocks, const offload_buffer *grids[nlevels], offload_buffer *hab_blocks, double forces[natoms][3], double virial[3][3])
Integrate all tasks of in given list from given grids. See grid_task_list.h for details.
void grid_ref_create_task_list(const bool orthorhombic, const int ntasks, const int nlevels, const int natoms, const int nkinds, const int nblocks, const int block_offsets[nblocks], const double atom_positions[natoms][3], const int atom_kinds[natoms], const grid_basis_set *basis_sets[nkinds], const int level_list[ntasks], const int iatom_list[ntasks], const int jatom_list[ntasks], const int iset_list[ntasks], const int jset_list[ntasks], const int ipgf_list[ntasks], const int jpgf_list[ntasks], const int border_mask_list[ntasks], const int block_num_list[ntasks], const double radius_list[ntasks], const double rab_list[ntasks][3], const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3], grid_ref_task_list **task_list_out)
Allocates a task list for the reference backend. See grid_task_list.h for details.
void grid_create_task_list(const bool orthorhombic, const int ntasks, const int nlevels, const int natoms, const int nkinds, const int nblocks, const int block_offsets[nblocks], const double atom_positions[natoms][3], const int atom_kinds[natoms], const grid_basis_set *basis_sets[nkinds], const int level_list[ntasks], const int iatom_list[ntasks], const int jatom_list[ntasks], const int iset_list[ntasks], const int jset_list[ntasks], const int ipgf_list[ntasks], const int jpgf_list[ntasks], const int border_mask_list[ntasks], const int block_num_list[ntasks], const double radius_list[ntasks], const double rab_list[ntasks][3], const int npts_global[nlevels][3], const int npts_local[nlevels][3], const int shift_local[nlevels][3], const int border_width[nlevels][3], const double dh[nlevels][3][3], const double dh_inv[nlevels][3][3], grid_task_list **task_list_out)
Allocates a task list which can be passed to grid_collocate_task_list. See grid_task_list....
void grid_collocate_task_list(const grid_task_list *task_list, const enum grid_func func, const int nlevels, const int npts_local[nlevels][3], const offload_buffer *pab_blocks, offload_buffer *grids[nlevels])
Collocate all tasks of in given list onto given grids. See grid_task_list.h for details.
void grid_free_task_list(grid_task_list *task_list)
Deallocates given task list, basis_sets have to be freed separately.
void grid_integrate_task_list(const grid_task_list *task_list, const bool compute_tau, const int natoms, const int nlevels, const int npts_local[nlevels][3], const offload_buffer *pab_blocks, const offload_buffer *grids[nlevels], offload_buffer *hab_blocks, double forces[natoms][3], double virial[3][3])
Integrate all tasks of in given list from given grids. See grid_task_list.h for details.
subroutine, public offload_free_buffer(buffer)
Deallocates given buffer.
subroutine, public offload_create_buffer(length, buffer)
Allocates a buffer of given length, ie. number of elements.
Internal representation of a basis set.
Configuration of the grid library.
enum grid_backend backend
Internal representation of a task list, abstracting various backends.
grid_dgemm_task_list * dgemm
Internal representation of a buffer.