7#ifndef GRID_GPU_TASK_LIST_H
8#define GRID_GPU_TASK_LIST_H
10#include "../../offload/offload_runtime.h"
11#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_GRID)
17#include "../../offload/offload_buffer.h"
18#include "../common/grid_basis_set.h"
19#include "../common/grid_constants.h"
26typedef struct grid_gpu_task_struct {
27 bool use_orthorhombic_kernel;
28 bool block_transposed;
41 double off_diag_twice;
75 int cube_center_shifted[3];
76 double cube_offset[3];
110 grid_gpu_layout *layouts;
111 int *tasks_per_level;
112 offloadStream_t *level_streams;
113 offloadStream_t main_stream;
118 grid_gpu_task *tasks_dev;
126void grid_gpu_create_task_list(
127 const bool orthorhombic,
const int ntasks,
const int nlevels,
128 const int natoms,
const int nkinds,
const int nblocks,
129 const int block_offsets[],
const double atom_positions[][3],
131 const int level_list[],
const int iatom_list[],
const int jatom_list[],
132 const int iset_list[],
const int jset_list[],
const int ipgf_list[],
133 const int jpgf_list[],
const int border_mask_list[],
134 const int block_num_list[],
const double radius_list[],
135 const double rab_list[][3],
const int npts_global[][3],
136 const int npts_local[][3],
const int shift_local[][3],
137 const int border_width[][3],
const double dh[][3][3],
138 const double dh_inv[][3][3], grid_gpu_task_list **task_list);
144void grid_gpu_free_task_list(grid_gpu_task_list *task_list);
151void grid_gpu_collocate_task_list(
const grid_gpu_task_list *task_list,
152 const enum grid_func func,
const int nlevels,
161void grid_gpu_integrate_task_list(
const grid_gpu_task_list *task_list,
162 const bool compute_tau,
const int natoms,
167 double forces[][3],
double virial[3][3]);
static void const int const int const int const int const int const double const int const int const int npts_local[3]
Internal representation of a basis set.
Internal representation of a buffer.