(git:374b731)
Loading...
Searching...
No Matches
qs_integrate_potential_product.F
Go to the documentation of this file.
1!--------------------------------------------------------------------------------------------------!
2! CP2K: A general program to perform molecular dynamics simulations !
3! Copyright 2000-2024 CP2K developers group <https://cp2k.org> !
4! !
5! SPDX-License-Identifier: GPL-2.0-or-later !
6!--------------------------------------------------------------------------------------------------!
7
8! **************************************************************************************************
9!> \brief Build up the plane wave density by collocating the primitive Gaussian
10!> functions (pgf).
11!> \par History
12!> Joost VandeVondele (02.2002)
13!> 1) rewrote collocate_pgf for increased accuracy and speed
14!> 2) collocate_core hack for PGI compiler
15!> 3) added multiple grid feature
16!> 4) new way to go over the grid
17!> Joost VandeVondele (05.2002)
18!> 1) prelim. introduction of the real space grid type
19!> JGH [30.08.02] multigrid arrays independent from potential
20!> JGH [17.07.03] distributed real space code
21!> JGH [23.11.03] refactoring and new loop ordering
22!> JGH [04.12.03] OpneMP parallelization of main loops
23!> Joost VandeVondele (12.2003)
24!> 1) modified to compute tau
25!> Joost removed incremental build feature
26!> Joost introduced map consistent
27!> Rewrote grid integration/collocation routines, [Joost VandeVondele,03.2007]
28!> JGH [26.06.15] modification to allow for k-points
29!> \author Matthias Krack (03.04.2001)
30! **************************************************************************************************
38 USE cell_types, ONLY: cell_type,&
39 pbc
42 USE dbcsr_api, ONLY: dbcsr_copy,&
43 dbcsr_finalize,&
44 dbcsr_get_block_p,&
45 dbcsr_p_type,&
46 dbcsr_type
50 USE kinds, ONLY: default_string_length,&
51 dp
53 USE orbital_pointers, ONLY: ncoset
55 USE pw_env_types, ONLY: pw_env_get,&
57 USE pw_types, ONLY: pw_r3d_rs_type
61 USE qs_kind_types, ONLY: get_qs_kind,&
75 USE virial_types, ONLY: virial_type
76
77!$ USE OMP_LIB, ONLY: omp_get_max_threads, omp_get_thread_num, omp_get_num_threads
78!$ USE OMP_LIB, ONLY: omp_lock_kind, &
79!$ omp_init_lock, omp_set_lock, &
80!$ omp_unset_lock, omp_destroy_lock
81#include "./base/base_uses.f90"
82
83 IMPLICIT NONE
84
85 PRIVATE
86
87 CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'qs_integrate_potential_product'
88
89! *** Public subroutines ***
90! *** Don't include this routines directly, use the interface to
91! *** qs_integrate_potential
92
93 PUBLIC :: integrate_v_rspace
94 PUBLIC :: integrate_v_dbasis
95
96CONTAINS
97
98! **************************************************************************************************
99!> \brief Integrate a potential v_rspace over the derivatives of the basis functions
100!> < da/dR | V | b > + < a | V | db/dR >
101!> Adapted from the old version of integrate_v_rspace (ED)
102!> \param v_rspace ...
103!> \param matrix_vhxc_dbasis ...
104!> \param matrix_p ...
105!> \param qs_env ...
106!> \param lambda The atom index.
107! **************************************************************************************************
108 SUBROUTINE integrate_v_dbasis(v_rspace, matrix_vhxc_dbasis, matrix_p, qs_env, lambda)
109 TYPE(pw_r3d_rs_type), INTENT(IN) :: v_rspace
110 TYPE(dbcsr_p_type), DIMENSION(:), POINTER :: matrix_vhxc_dbasis
111 TYPE(dbcsr_type), POINTER :: matrix_p
112 TYPE(qs_environment_type), POINTER :: qs_env
113 INTEGER, INTENT(IN) :: lambda
114
115 CHARACTER(len=*), PARAMETER :: routinen = 'integrate_v_dbasis'
116
117 INTEGER :: bcol, brow, handle, i, iatom, igrid_level, ikind, ikind_old, ilevel, img, ipair, &
118 ipgf, ipgf_new, iset, iset_new, iset_old, itask, ithread, jatom, jkind, jkind_old, jpgf, &
119 jpgf_new, jset, jset_new, jset_old, maxco, maxpgf, maxset, maxsgf_set, na1, na2, natom, &
120 nb1, nb2, ncoa, ncob, nimages, nkind, nseta, nsetb, nthread, sgfa, sgfb
121 INTEGER, ALLOCATABLE, DIMENSION(:, :) :: block_touched
122 INTEGER, DIMENSION(:), POINTER :: la_max, la_min, lb_max, lb_min, npgfa, &
123 npgfb, nsgfa, nsgfb
124 INTEGER, DIMENSION(:, :), POINTER :: first_sgfa, first_sgfb
125 LOGICAL :: atom_pair_changed, atom_pair_done, dh_duplicated, distributed_grids, found, &
126 my_compute_tau, new_set_pair_coming, pab_required, scatter, use_subpatch
127 REAL(kind=dp) :: eps_rho_rspace, f, prefactor, radius, &
128 scalef, zetp
129 REAL(kind=dp), DIMENSION(3) :: force_a, force_b, ra, rab, rab_inv, rb, &
130 rp
131 REAL(kind=dp), DIMENSION(3, 3) :: my_virial_a, my_virial_b
132 REAL(kind=dp), DIMENSION(:), POINTER :: set_radius_a, set_radius_b
133 REAL(kind=dp), DIMENSION(:, :), POINTER :: h_block, hab, p_block, pab, rpgfa, &
134 rpgfb, sphi_a, sphi_b, work, zeta, zetb
135 REAL(kind=dp), DIMENSION(:, :, :), POINTER :: habt, hadb, hdab, pabt, workt
136 REAL(kind=dp), DIMENSION(:, :, :, :), POINTER :: hadbt, hdabt
137 TYPE(atom_pair_type), DIMENSION(:), POINTER :: atom_pair_recv, atom_pair_send
138 TYPE(atomic_kind_type), DIMENSION(:), POINTER :: atomic_kind_set
139 TYPE(block_p_type), ALLOCATABLE, DIMENSION(:) :: vhxc_block
140 TYPE(cell_type), POINTER :: cell
141 TYPE(dbcsr_p_type), DIMENSION(:), POINTER :: deltap
142 TYPE(dft_control_type), POINTER :: dft_control
143 TYPE(gridlevel_info_type), POINTER :: gridlevel_info
144 TYPE(gto_basis_set_type), POINTER :: orb_basis_set
145 TYPE(mp_comm_type) :: group
146 TYPE(particle_type), DIMENSION(:), POINTER :: particle_set
147 TYPE(pw_env_type), POINTER :: pw_env
148 TYPE(qs_force_type), DIMENSION(:), POINTER :: force
149 TYPE(qs_kind_type), DIMENSION(:), POINTER :: qs_kind_set
150 TYPE(realspace_grid_desc_p_type), DIMENSION(:), &
151 POINTER :: rs_descs
152 TYPE(realspace_grid_type), DIMENSION(:), POINTER :: rs_rho
153 TYPE(task_list_type), POINTER :: task_list
154 TYPE(task_type), DIMENSION(:), POINTER :: tasks
155
156 CALL timeset(routinen, handle)
157 NULLIFY (pw_env)
158
159 ! get the task lists
160 CALL get_qs_env(qs_env=qs_env, task_list=task_list)
161 cpassert(ASSOCIATED(task_list))
162
163 ! the information on the grids is provided through pw_env
164 ! pw_env has to be the parent env for the potential grid (input)
165 ! there is an option to provide an external grid
166 CALL get_qs_env(qs_env=qs_env, pw_env=pw_env)
167
168 ! *** assign from pw_env
169 gridlevel_info => pw_env%gridlevel_info
170
171 ! get all the general information on the system we are working on
172 CALL get_qs_env(qs_env=qs_env, &
173 atomic_kind_set=atomic_kind_set, &
174 qs_kind_set=qs_kind_set, &
175 cell=cell, &
176 natom=natom, &
177 dft_control=dft_control, &
178 particle_set=particle_set)
179
180 ! GAPW not implemented here
181 cpassert(.NOT. dft_control%qs_control%gapw)
182
183 ! *** set up the rs multi-grids
184 cpassert(ASSOCIATED(pw_env))
185 CALL pw_env_get(pw_env, rs_descs=rs_descs, rs_grids=rs_rho)
186 DO igrid_level = 1, gridlevel_info%ngrid_levels
187 distributed_grids = rs_rho(igrid_level)%desc%distributed
188 END DO
189 ! get mpi group from rs_rho
190 group = rs_rho(1)%desc%group
191
192 ! transform the potential on the rs_multigrids
193 CALL potential_pw2rs(rs_rho, v_rspace, pw_env)
194
195 nkind = SIZE(qs_kind_set)
196
197 CALL get_qs_kind_set(qs_kind_set=qs_kind_set, &
198 maxco=maxco, &
199 maxsgf_set=maxsgf_set, &
200 basis_type="ORB")
201
202 ! short cuts to task list variables
203 tasks => task_list%tasks
204 atom_pair_send => task_list%atom_pair_send
205 atom_pair_recv => task_list%atom_pair_recv
206
207 ! needs to be consistent with rho_rspace
208 eps_rho_rspace = dft_control%qs_control%eps_rho_rspace
209
210 ! *** Initialize working density matrix ***
211 ! distributed rs grids require a matrix that will be changed
212 ! whereas this is not the case for replicated grids
213 ALLOCATE (deltap(dft_control%nimages))
214 IF (distributed_grids) THEN
215 ! this matrix has no strict sparsity pattern in parallel
216 ! deltap%sparsity_id=-1
217 CALL dbcsr_copy(deltap(1)%matrix, matrix_p, name="DeltaP")
218 ELSE
219 deltap(1)%matrix => matrix_p
220 END IF
221 nthread = 1
222!$ nthread = omp_get_max_threads()
223
224 ! *** Allocate work storage ***
225 NULLIFY (pabt, habt, workt)
226 ALLOCATE (habt(maxco, maxco, 0:nthread))
227 ALLOCATE (workt(maxco, maxsgf_set, 0:nthread))
228 ALLOCATE (hdabt(3, maxco, maxco, 0:nthread))
229 ALLOCATE (hadbt(3, maxco, maxco, 0:nthread))
230 ALLOCATE (pabt(maxco, maxco, 0:nthread))
231
232 IF (distributed_grids) THEN
233 CALL rs_distribute_matrix(rs_descs, deltap, atom_pair_send, atom_pair_recv, &
234 nimages, scatter=.true.)
235 END IF
236
237!$OMP PARALLEL DEFAULT(NONE), &
238!$OMP SHARED(workt,habt,hdabt,hadbt,pabt,tasks,particle_set,natom,maxset), &
239!$OMP SHARED(maxpgf,matrix_vhxc_dbasis,deltap), &
240!$OMP SHARED(pab_required,ncoset,rs_rho,my_compute_tau), &
241!$OMP SHARED(eps_rho_rspace,force,cell), &
242!$OMP SHARED(gridlevel_info,task_list,block_touched,nthread,qs_kind_set), &
243!$OMP SHARED(nimages,lambda, dh_duplicated), &
244!$OMP PRIVATE(ithread,work,hab,hdab,hadb,pab,iset_old,jset_old), &
245!$OMP PRIVATE(ikind_old,jkind_old,iatom,jatom,iset,jset,ikind,jkind,ilevel,ipgf,jpgf), &
246!$OMP PRIVATE(img,brow,bcol,orb_basis_set,first_sgfa,la_max,la_min,npgfa,nseta,nsgfa), &
247!$OMP PRIVATE(rpgfa,set_radius_a,sphi_a,zeta,first_sgfb,lb_max,lb_min,npgfb), &
248!$OMP PRIVATE(nsetb,nsgfb,rpgfb,set_radius_b,sphi_b,zetb,found), &
249!$OMP PRIVATE(force_a,force_b,my_virial_a,my_virial_b,atom_pair_changed,h_block, vhxc_block), &
250!$OMP PRIVATE(p_block,ncoa,sgfa,ncob,sgfb,rab,ra,rb,rp,zetp,f,prefactor,radius,igrid_level), &
251!$OMP PRIVATE(na1,na2,nb1,nb2,use_subpatch,rab_inv,new_set_pair_coming,atom_pair_done), &
252!$OMP PRIVATE(iset_new,jset_new,ipgf_new,jpgf_new,scalef), &
253!$OMP PRIVATE(itask)
254
255 IF (.NOT. ALLOCATED(vhxc_block)) ALLOCATE (vhxc_block(3))
256
257 ithread = 0
258!$ ithread = omp_get_thread_num()
259 work => workt(:, :, ithread)
260 hab => habt(:, :, ithread)
261 pab => pabt(:, :, ithread)
262 hdab => hdabt(:, :, :, ithread)
263 hadb => hadbt(:, :, :, ithread)
264
265 iset_old = -1; jset_old = -1
266 ikind_old = -1; jkind_old = -1
267
268 ! Here we loop over gridlevels first, finalising the matrix after each grid level is
269 ! completed. On each grid level, we loop over atom pairs, which will only access
270 ! a single block of each matrix, so with OpenMP, each matrix block is only touched
271 ! by a single thread for each grid level
272 loop_gridlevels: DO igrid_level = 1, gridlevel_info%ngrid_levels
273!$OMP BARRIER
274!$OMP DO schedule (dynamic, MAX(1,task_list%npairs(igrid_level)/(nthread*50)))
275 loop_pairs: DO ipair = 1, task_list%npairs(igrid_level)
276 loop_tasks: DO itask = task_list%taskstart(ipair, igrid_level), task_list%taskstop(ipair, igrid_level)
277 ilevel = tasks(itask)%grid_level
278 img = tasks(itask)%image
279 iatom = tasks(itask)%iatom
280 jatom = tasks(itask)%jatom
281 iset = tasks(itask)%iset
282 jset = tasks(itask)%jset
283 ipgf = tasks(itask)%ipgf
284 jpgf = tasks(itask)%jpgf
285
286 ! At the start of a block of tasks, get atom data (and kind data, if needed)
287 IF (itask .EQ. task_list%taskstart(ipair, igrid_level)) THEN
288
289 ikind = particle_set(iatom)%atomic_kind%kind_number
290 jkind = particle_set(jatom)%atomic_kind%kind_number
291
292 ra(:) = pbc(particle_set(iatom)%r, cell)
293
294 IF (iatom <= jatom) THEN
295 brow = iatom
296 bcol = jatom
297 ELSE
298 brow = jatom
299 bcol = iatom
300 END IF
301
302 IF (ikind .NE. ikind_old) THEN
303 CALL get_qs_kind(qs_kind_set(ikind), &
304 basis_set=orb_basis_set, basis_type="ORB")
305
306 CALL get_gto_basis_set(gto_basis_set=orb_basis_set, &
307 first_sgf=first_sgfa, &
308 lmax=la_max, &
309 lmin=la_min, &
310 npgf=npgfa, &
311 nset=nseta, &
312 nsgf_set=nsgfa, &
313 pgf_radius=rpgfa, &
314 set_radius=set_radius_a, &
315 sphi=sphi_a, &
316 zet=zeta)
317 END IF
318
319 IF (jkind .NE. jkind_old) THEN
320 CALL get_qs_kind(qs_kind_set(jkind), &
321 basis_set=orb_basis_set, basis_type="ORB")
322 CALL get_gto_basis_set(gto_basis_set=orb_basis_set, &
323 first_sgf=first_sgfb, &
324 lmax=lb_max, &
325 lmin=lb_min, &
326 npgf=npgfb, &
327 nset=nsetb, &
328 nsgf_set=nsgfb, &
329 pgf_radius=rpgfb, &
330 set_radius=set_radius_b, &
331 sphi=sphi_b, &
332 zet=zetb)
333
334 END IF
335
336 DO i = 1, 3
337 NULLIFY (vhxc_block(i)%block)
338 CALL dbcsr_get_block_p(matrix_vhxc_dbasis(i)%matrix, brow, bcol, vhxc_block(i)%block, found)
339 cpassert(found)
340 END DO
341
342 CALL dbcsr_get_block_p(matrix=deltap(img)%matrix, &
343 row=brow, col=bcol, block=p_block, found=found)
344 cpassert(found)
345
346 ikind_old = ikind
347 jkind_old = jkind
348
349 atom_pair_changed = .true.
350
351 ELSE
352
353 atom_pair_changed = .false.
354
355 END IF
356
357 IF (atom_pair_changed .OR. iset_old .NE. iset .OR. jset_old .NE. jset) THEN
358
359 ncoa = npgfa(iset)*ncoset(la_max(iset))
360 sgfa = first_sgfa(1, iset)
361 ncob = npgfb(jset)*ncoset(lb_max(jset))
362 sgfb = first_sgfb(1, jset)
363
364 IF (iatom <= jatom) THEN
365 work(1:ncoa, 1:nsgfb(jset)) = matmul(sphi_a(1:ncoa, sgfa:sgfa + nsgfa(iset) - 1), &
366 p_block(sgfa:sgfa + nsgfa(iset) - 1, sgfb:sgfb + nsgfb(jset) - 1))
367 pab(1:ncoa, 1:ncob) = matmul(work(1:ncoa, 1:nsgfb(jset)), transpose(sphi_b(1:ncob, sgfb:sgfb + nsgfb(jset) - 1)))
368 ELSE
369 work(1:ncob, 1:nsgfa(iset)) = matmul(sphi_b(1:ncob, sgfb:sgfb + nsgfb(jset) - 1), &
370 p_block(sgfb:sgfb + nsgfb(jset) - 1, sgfa:sgfa + nsgfa(iset) - 1))
371 pab(1:ncob, 1:ncoa) = matmul(work(1:ncob, 1:nsgfa(iset)), transpose(sphi_a(1:ncoa, sgfa:sgfa + nsgfa(iset) - 1)))
372 END IF
373
374 IF (iatom <= jatom) THEN
375 hab(1:ncoa, 1:ncob) = 0._dp
376 hdab(:, 1:ncoa, 1:ncob) = 0._dp
377 hadb(:, 1:ncoa, 1:ncob) = 0._dp
378 ELSE
379 hab(1:ncob, 1:ncoa) = 0._dp
380 hdab(:, 1:ncob, 1:ncoa) = 0._dp
381 hadb(:, 1:ncob, 1:ncoa) = 0._dp
382 END IF
383
384 iset_old = iset
385 jset_old = jset
386
387 END IF
388
389 rab = tasks(itask)%rab
390 rb(:) = ra(:) + rab(:)
391 zetp = zeta(ipgf, iset) + zetb(jpgf, jset)
392
393 f = zetb(jpgf, jset)/zetp
394 rp(:) = ra(:) + f*rab(:)
395 prefactor = exp(-zeta(ipgf, iset)*f*dot_product(rab, rab))
396 radius = exp_radius_very_extended(la_min=la_min(iset), la_max=la_max(iset), &
397 lb_min=lb_min(jset), lb_max=lb_max(jset), &
398 ra=ra, rb=rb, rp=rp, &
399 zetp=zetp, eps=eps_rho_rspace, &
400 prefactor=prefactor, cutoff=1.0_dp)
401
402 na1 = (ipgf - 1)*ncoset(la_max(iset)) + 1
403 na2 = ipgf*ncoset(la_max(iset))
404 nb1 = (jpgf - 1)*ncoset(lb_max(jset)) + 1
405 nb2 = jpgf*ncoset(lb_max(jset))
406
407 ! check whether we need to use fawzi's generalised collocation scheme
408 IF (rs_rho(igrid_level)%desc%distributed) THEN
409 !tasks(4,:) is 0 for replicated, 1 for distributed 2 for exceptional distributed tasks
410 IF (tasks(itask)%dist_type .EQ. 2) THEN
411 use_subpatch = .true.
412 ELSE
413 use_subpatch = .false.
414 END IF
415 ELSE
416 use_subpatch = .false.
417 END IF
418
419 IF (iatom <= jatom) THEN
420 IF (iatom == lambda) &
422 la_max(iset), zeta(ipgf, iset), la_min(iset), &
423 lb_max(jset), zetb(jpgf, jset), lb_min(jset), &
424 ra, rab, rs_rho(igrid_level), &
425 hab, o1=na1 - 1, o2=nb1 - 1, &
426 radius=radius, &
427 calculate_forces=.true., &
428 compute_tau=.false., &
429 use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
430 hdab=hdab, pab=pab)
431 IF (jatom == lambda) &
433 la_max(iset), zeta(ipgf, iset), la_min(iset), &
434 lb_max(jset), zetb(jpgf, jset), lb_min(jset), &
435 ra, rab, rs_rho(igrid_level), &
436 hab, o1=na1 - 1, o2=nb1 - 1, &
437 radius=radius, &
438 calculate_forces=.true., &
439 compute_tau=.false., &
440 use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
441 hadb=hadb, pab=pab)
442 ELSE
443 rab_inv = -rab
444 IF (iatom == lambda) &
446 lb_max(jset), zetb(jpgf, jset), lb_min(jset), &
447 la_max(iset), zeta(ipgf, iset), la_min(iset), &
448 rb, rab_inv, rs_rho(igrid_level), &
449 hab, o1=nb1 - 1, o2=na1 - 1, &
450 radius=radius, &
451 calculate_forces=.true., &
452 force_a=force_b, force_b=force_a, &
453 compute_tau=.false., &
454 use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
455 hadb=hadb, pab=pab)
456 IF (jatom == lambda) &
458 lb_max(jset), zetb(jpgf, jset), lb_min(jset), &
459 la_max(iset), zeta(ipgf, iset), la_min(iset), &
460 rb, rab_inv, rs_rho(igrid_level), &
461 hab, o1=nb1 - 1, o2=na1 - 1, &
462 radius=radius, &
463 calculate_forces=.true., &
464 force_a=force_b, force_b=force_a, &
465 compute_tau=.false., &
466 use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
467 hdab=hdab, pab=pab)
468 END IF
469
470 new_set_pair_coming = .false.
471 atom_pair_done = .false.
472 IF (itask < task_list%taskstop(ipair, igrid_level)) THEN
473 ilevel = tasks(itask + 1)%grid_level
474 img = tasks(itask + 1)%image
475 iatom = tasks(itask + 1)%iatom
476 jatom = tasks(itask + 1)%jatom
477 iset_new = tasks(itask + 1)%iset
478 jset_new = tasks(itask + 1)%jset
479 ipgf_new = tasks(itask + 1)%ipgf
480 jpgf_new = tasks(itask + 1)%jpgf
481 IF (iset_new .NE. iset .OR. jset_new .NE. jset) THEN
482 new_set_pair_coming = .true.
483 END IF
484 ELSE
485 ! do not forget the last block
486 new_set_pair_coming = .true.
487 atom_pair_done = .true.
488 END IF
489
490 IF (new_set_pair_coming) THEN
491
492 DO i = 1, 3
493 hdab(i, :, :) = hdab(i, :, :) + hadb(i, :, :)
494 IF (iatom <= jatom) THEN
495 work(1:ncoa, 1:nsgfb(jset)) = matmul(hdab(i, 1:ncoa, 1:ncob), sphi_b(1:ncob, sgfb:sgfb + nsgfb(jset) - 1))
496 vhxc_block(i)%block(sgfa:sgfa + nsgfa(iset) - 1, sgfb:sgfb + nsgfb(jset) - 1) = &
497 vhxc_block(i)%block(sgfa:sgfa + nsgfa(iset) - 1, sgfb:sgfb + nsgfb(jset) - 1) + &
498 matmul(transpose(sphi_a(1:ncoa, sgfa:sgfa + nsgfa(iset) - 1)), work(1:ncoa, 1:nsgfb(jset)))
499 ELSE
500 work(1:ncob, 1:nsgfa(iset)) = matmul(hdab(i, 1:ncob, 1:ncoa), sphi_a(1:ncoa, sgfa:sgfa + nsgfa(iset) - 1))
501 vhxc_block(i)%block(sgfb:sgfb + nsgfb(jset) - 1, sgfa:sgfa + nsgfa(iset) - 1) = &
502 vhxc_block(i)%block(sgfb:sgfb + nsgfb(jset) - 1, sgfa:sgfa + nsgfa(iset) - 1) + &
503 matmul(transpose(sphi_b(1:ncob, sgfb:sgfb + nsgfb(jset) - 1)), work(1:ncob, 1:nsgfa(iset)))
504 END IF
505 END DO
506 END IF ! new_set_pair_coming
507
508 END DO loop_tasks
509 END DO loop_pairs
510!$OMP END DO
511
512 DO i = 1, 3
513 CALL dbcsr_finalize(matrix_vhxc_dbasis(i)%matrix)
514 END DO
515
516 END DO loop_gridlevels
517
518!$OMP END PARALLEL
519
520 IF (distributed_grids) THEN
521 ! Reconstruct H matrix if using distributed RS grids
522 ! note send and recv direction reversed WRT collocate
523 scatter = .false.
524 CALL rs_distribute_matrix(rs_descs, matrix_vhxc_dbasis, atom_pair_recv, atom_pair_send, &
525 dft_control%nimages, scatter=.false.)
526 END IF
527
528 IF (distributed_grids) THEN
529 CALL dbcsr_deallocate_matrix_set(deltap)
530 ELSE
531 DO img = 1, dft_control%nimages
532 NULLIFY (deltap(img)%matrix)
533 END DO
534 DEALLOCATE (deltap)
535 END IF
536
537 DEALLOCATE (pabt, habt, workt, hdabt, hadbt)
538
539 CALL timestop(handle)
540 END SUBROUTINE integrate_v_dbasis
541
542! **************************************************************************************************
543!> \brief computes matrix elements corresponding to a given potential
544!> \param v_rspace ...
545!> \param hmat ...
546!> \param hmat_kp ...
547!> \param pmat ...
548!> \param pmat_kp ...
549!> \param qs_env ...
550!> \param calculate_forces ...
551!> \param force_adm optional scaling of force
552!> \param compute_tau ...
553!> \param gapw ...
554!> \param basis_type ...
555!> \param pw_env_external ...
556!> \param task_list_external ...
557!> \par History
558!> IAB (29-Apr-2010): Added OpenMP parallelisation to task loop
559!> (c) The Numerical Algorithms Group (NAG) Ltd, 2010 on behalf of the HECToR project
560!> Some refactoring, get priorities for options correct (JGH, 04.2014)
561!> Added options to allow for k-points
562!> For a smooth transition we allow for old and new (vector) matrices (hmat, pmat) (JGH, 06.2015)
563!> \note
564!> integrates a given potential (or other object on a real
565!> space grid) = v_rspace using a multi grid technique (mgrid_*)
566!> over the basis set producing a number for every element of h
567!> (should have the same sparsity structure of S)
568!> additional screening is available using the magnitude of the
569!> elements in p (? I'm not sure this is a very good idea)
570!> this argument is optional
571!> derivatives of these matrix elements with respect to the ionic
572!> coordinates can be computed as well
573! **************************************************************************************************
574 SUBROUTINE integrate_v_rspace(v_rspace, hmat, hmat_kp, pmat, pmat_kp, &
575 qs_env, calculate_forces, force_adm, &
576 compute_tau, gapw, basis_type, pw_env_external, task_list_external)
577
578 TYPE(pw_r3d_rs_type), INTENT(IN) :: v_rspace
579 TYPE(dbcsr_p_type), INTENT(INOUT), OPTIONAL :: hmat
580 TYPE(dbcsr_p_type), DIMENSION(:), OPTIONAL, &
581 POINTER :: hmat_kp
582 TYPE(dbcsr_p_type), INTENT(IN), OPTIONAL :: pmat
583 TYPE(dbcsr_p_type), DIMENSION(:), OPTIONAL, &
584 POINTER :: pmat_kp
585 TYPE(qs_environment_type), POINTER :: qs_env
586 LOGICAL, INTENT(IN) :: calculate_forces
587 REAL(kind=dp), INTENT(IN), OPTIONAL :: force_adm
588 LOGICAL, INTENT(IN), OPTIONAL :: compute_tau, gapw
589 CHARACTER(len=*), INTENT(IN), OPTIONAL :: basis_type
590 TYPE(pw_env_type), OPTIONAL, POINTER :: pw_env_external
591 TYPE(task_list_type), OPTIONAL, POINTER :: task_list_external
592
593 CHARACTER(len=*), PARAMETER :: routinen = 'integrate_v_rspace'
594
595 CHARACTER(len=default_string_length) :: my_basis_type
596 INTEGER :: atom_a, handle, iatom, igrid_level, &
597 ikind, img, maxco, maxsgf_set, natoms, &
598 nimages, nkind
599 INTEGER, ALLOCATABLE, DIMENSION(:) :: atom_of_kind, kind_of
600 LOGICAL :: calculate_virial, distributed_grids, &
601 do_kp, my_compute_tau, my_gapw, &
602 pab_required
603 REAL(kind=dp) :: admm_scal_fac
604 REAL(kind=dp), ALLOCATABLE, DIMENSION(:, :) :: forces_array
605 REAL(kind=dp), DIMENSION(3, 3) :: virial_matrix
606 TYPE(atomic_kind_type), DIMENSION(:), POINTER :: atomic_kind_set
607 TYPE(cell_type), POINTER :: cell
608 TYPE(dbcsr_p_type), DIMENSION(:), POINTER :: deltap, dhmat
609 TYPE(dft_control_type), POINTER :: dft_control
610 TYPE(gridlevel_info_type), POINTER :: gridlevel_info
611 TYPE(mp_comm_type) :: group
612 TYPE(particle_type), DIMENSION(:), POINTER :: particle_set
613 TYPE(pw_env_type), POINTER :: pw_env
614 TYPE(qs_force_type), DIMENSION(:), POINTER :: force
615 TYPE(qs_kind_type), DIMENSION(:), POINTER :: qs_kind_set
616 TYPE(realspace_grid_type), DIMENSION(:), POINTER :: rs_v
617 TYPE(task_list_type), POINTER :: task_list, task_list_soft
618 TYPE(virial_type), POINTER :: virial
619
620 CALL timeset(routinen, handle)
621
622 ! we test here if the provided operator matrices are consistent
623 cpassert(PRESENT(hmat) .OR. PRESENT(hmat_kp))
624 do_kp = .false.
625 IF (PRESENT(hmat_kp)) do_kp = .true.
626 IF (PRESENT(pmat)) THEN
627 cpassert(PRESENT(hmat))
628 ELSE IF (PRESENT(pmat_kp)) THEN
629 cpassert(PRESENT(hmat_kp))
630 END IF
631
632 NULLIFY (pw_env)
633
634 ! this routine works in two modes:
635 ! normal mode : <a| V | b>
636 ! tau mode : < nabla a| V | nabla b>
637 my_compute_tau = .false.
638 IF (PRESENT(compute_tau)) my_compute_tau = compute_tau
639
640 ! this sets the basis set to be used. GAPW(==soft basis) overwrites basis_type
641 ! default is "ORB"
642 my_gapw = .false.
643 IF (PRESENT(gapw)) my_gapw = gapw
644 IF (PRESENT(basis_type)) THEN
645 my_basis_type = basis_type
646 ELSE
647 my_basis_type = "ORB"
648 END IF
649
650 ! get the task lists
651 ! task lists have to be in sync with basis sets
652 ! there is an option to provide the task list from outside (not through qs_env)
653 ! outside option has highest priority
654 CALL get_qs_env(qs_env=qs_env, &
655 task_list=task_list, &
656 task_list_soft=task_list_soft)
657 IF (.NOT. my_basis_type == "ORB") THEN
658 cpassert(PRESENT(task_list_external))
659 END IF
660 IF (my_gapw) task_list => task_list_soft
661 IF (PRESENT(task_list_external)) task_list => task_list_external
662 cpassert(ASSOCIATED(task_list))
663
664 ! the information on the grids is provided through pw_env
665 ! pw_env has to be the parent env for the potential grid (input)
666 ! there is an option to provide an external grid
667 CALL get_qs_env(qs_env=qs_env, pw_env=pw_env)
668 IF (PRESENT(pw_env_external)) pw_env => pw_env_external
669
670 ! get all the general information on the system we are working on
671 CALL get_qs_env(qs_env=qs_env, &
672 atomic_kind_set=atomic_kind_set, &
673 qs_kind_set=qs_kind_set, &
674 cell=cell, &
675 natom=natoms, &
676 dft_control=dft_control, &
677 particle_set=particle_set, &
678 force=force, &
679 virial=virial)
680
681 admm_scal_fac = 1.0_dp
682 IF (PRESENT(force_adm)) admm_scal_fac = force_adm
683
684 cpassert(ASSOCIATED(pw_env))
685 CALL pw_env_get(pw_env, rs_grids=rs_v)
686
687 ! get mpi group from rs_v
688 group = rs_v(1)%desc%group
689
690 ! assign from pw_env
691 gridlevel_info => pw_env%gridlevel_info
692
693 ! transform the potential on the rs_multigrids
694 CALL potential_pw2rs(rs_v, v_rspace, pw_env)
695
696 nimages = dft_control%nimages
697 IF (nimages > 1) THEN
698 cpassert(do_kp)
699 END IF
700 nkind = SIZE(qs_kind_set)
701 calculate_virial = virial%pv_availability .AND. (.NOT. virial%pv_numer) .AND. calculate_forces
702 pab_required = (PRESENT(pmat) .OR. PRESENT(pmat_kp)) .AND. calculate_forces
703
704 CALL get_qs_kind_set(qs_kind_set=qs_kind_set, &
705 maxco=maxco, &
706 maxsgf_set=maxsgf_set, &
707 basis_type=my_basis_type)
708
709 distributed_grids = .false.
710 DO igrid_level = 1, gridlevel_info%ngrid_levels
711 IF (rs_v(igrid_level)%desc%distributed) THEN
712 distributed_grids = .true.
713 END IF
714 END DO
715
716 ALLOCATE (forces_array(3, natoms))
717
718 IF (pab_required) THEN
719 ! initialize the working pmat structures
720 ALLOCATE (deltap(nimages))
721 IF (do_kp) THEN
722 DO img = 1, nimages
723 deltap(img)%matrix => pmat_kp(img)%matrix
724 END DO
725 ELSE
726 deltap(1)%matrix => pmat%matrix
727 END IF
728
729 ! Distribute matrix blocks.
730 IF (distributed_grids) THEN
731 CALL rs_scatter_matrices(deltap, task_list%pab_buffer, task_list, group)
732 ELSE
733 CALL rs_copy_to_buffer(deltap, task_list%pab_buffer, task_list)
734 END IF
735 DEALLOCATE (deltap)
736 END IF
737
738 ! Map all tasks from the grids
739 CALL grid_integrate_task_list(task_list=task_list%grid_task_list, &
740 compute_tau=my_compute_tau, &
741 calculate_forces=calculate_forces, &
742 calculate_virial=calculate_virial, &
743 pab_blocks=task_list%pab_buffer, &
744 rs_grids=rs_v, &
745 hab_blocks=task_list%hab_buffer, &
746 forces=forces_array, &
747 virial=virial_matrix)
748
749 IF (calculate_forces) THEN
750 CALL get_atomic_kind_set(atomic_kind_set, atom_of_kind=atom_of_kind, kind_of=kind_of)
751!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(atom_a, ikind) &
752!$OMP SHARED(natoms, force, forces_array, atom_of_kind, kind_of, admm_scal_fac)
753 DO iatom = 1, natoms
754 atom_a = atom_of_kind(iatom)
755 ikind = kind_of(iatom)
756 force(ikind)%rho_elec(:, atom_a) = force(ikind)%rho_elec(:, atom_a) + admm_scal_fac*forces_array(:, iatom)
757 END DO
758!$OMP END PARALLEL DO
759 DEALLOCATE (atom_of_kind, kind_of)
760 END IF
761
762 IF (calculate_virial) THEN
763 virial%pv_virial = virial%pv_virial + admm_scal_fac*virial_matrix
764 END IF
765
766 ! Gather all matrix images into a single array.
767 ALLOCATE (dhmat(nimages))
768 IF (PRESENT(hmat_kp)) THEN
769 cpassert(.NOT. PRESENT(hmat))
770 DO img = 1, nimages
771 dhmat(img)%matrix => hmat_kp(img)%matrix
772 END DO
773 ELSE
774 cpassert(PRESENT(hmat) .AND. nimages == 1)
775 dhmat(1)%matrix => hmat%matrix
776 END IF
777
778 ! Distribute matrix blocks.
779 IF (distributed_grids) THEN
780 CALL rs_gather_matrices(task_list%hab_buffer, dhmat, task_list, group)
781 ELSE
782 CALL rs_copy_to_matrices(task_list%hab_buffer, dhmat, task_list)
783 END IF
784 DEALLOCATE (dhmat)
785
786 CALL timestop(handle)
787
788 END SUBROUTINE integrate_v_rspace
789
All kind of helpful little routines.
Definition ao_util.F:14
real(kind=dp) function, public exp_radius_very_extended(la_min, la_max, lb_min, lb_max, pab, o1, o2, ra, rb, rp, zetp, eps, prefactor, cutoff, epsabs)
computes the radius of the Gaussian outside of which it is smaller than eps
Definition ao_util.F:209
Define the atomic kind types and their sub types.
subroutine, public get_atomic_kind_set(atomic_kind_set, atom_of_kind, kind_of, natom_of_kind, maxatom, natom, nshell, fist_potential_present, shell_present, shell_adiabatic, shell_check_distance, damping_present)
Get attributes of an atomic kind set.
subroutine, public get_gto_basis_set(gto_basis_set, name, aliases, norm_type, kind_radius, ncgf, nset, nsgf, cgf_symbol, sgf_symbol, norm_cgf, set_radius, lmax, lmin, lx, ly, lz, m, ncgf_set, npgf, nsgf_set, nshell, cphi, pgf_radius, sphi, scon, zet, first_cgf, first_sgf, l, last_cgf, last_sgf, n, gcc, maxco, maxl, maxpgf, maxsgf_set, maxshell, maxso, nco_sum, npgf_sum, nshell_sum, maxder, short_kind_radius)
...
collect pointers to a block of reals
Handles all functions related to the CELL.
Definition cell_types.F:15
Defines control structures, which contain the parameters and the settings for the DFT-based calculati...
DBCSR operations in CP2K.
Fortran API for the grid package, which is written in C.
Definition grid_api.F:12
subroutine, public integrate_pgf_product(la_max, zeta, la_min, lb_max, zetb, lb_min, ra, rab, rsgrid, hab, pab, o1, o2, radius, calculate_forces, force_a, force_b, compute_tau, use_virial, my_virial_a, my_virial_b, hdab, hadb, a_hdab, use_subpatch, subpatch_pattern)
low level function to compute matrix elements of primitive gaussian functions
Definition grid_api.F:279
subroutine, public grid_integrate_task_list(task_list, compute_tau, calculate_forces, calculate_virial, pab_blocks, rs_grids, hab_blocks, forces, virial)
Integrate all tasks of in given list from given grids.
Definition grid_api.F:1040
Defines the basic variable types.
Definition kinds.F:23
integer, parameter, public dp
Definition kinds.F:34
integer, parameter, public default_string_length
Definition kinds.F:57
Interface to the message passing library MPI.
Provides Cartesian and spherical orbital pointers and indices.
integer, dimension(:), allocatable, public ncoset
Define the data structure for the particle information.
container for various plainwaves related things
subroutine, public pw_env_get(pw_env, pw_pools, cube_info, gridlevel_info, auxbas_pw_pool, auxbas_grid, auxbas_rs_desc, auxbas_rs_grid, rs_descs, rs_grids, xc_pw_pool, vdw_pw_pool, poisson_env, interp_section)
returns the various attributes of the pw env
subroutine, public get_qs_env(qs_env, atomic_kind_set, qs_kind_set, cell, super_cell, cell_ref, use_ref_cell, kpoints, dft_control, mos, sab_orb, sab_all, qmmm, qmmm_periodic, sac_ae, sac_ppl, sac_lri, sap_ppnl, sab_vdw, sab_scp, sap_oce, sab_lrc, sab_se, sab_xtbe, sab_tbe, sab_core, sab_xb, sab_xtb_nonbond, sab_almo, sab_kp, sab_kp_nosym, particle_set, energy, force, matrix_h, matrix_h_im, matrix_ks, matrix_ks_im, matrix_vxc, run_rtp, rtp, matrix_h_kp, matrix_h_im_kp, matrix_ks_kp, matrix_ks_im_kp, matrix_vxc_kp, kinetic_kp, matrix_s_kp, matrix_w_kp, matrix_s_ri_aux_kp, matrix_s, matrix_s_ri_aux, matrix_w, matrix_p_mp2, matrix_p_mp2_admm, rho, rho_xc, pw_env, ewald_env, ewald_pw, active_space, mpools, input, para_env, blacs_env, scf_control, rel_control, kinetic, qs_charges, vppl, rho_core, rho_nlcc, rho_nlcc_g, ks_env, ks_qmmm_env, wf_history, scf_env, local_particles, local_molecules, distribution_2d, dbcsr_dist, molecule_kind_set, molecule_set, subsys, cp_subsys, oce, local_rho_set, rho_atom_set, task_list, task_list_soft, rho0_atom_set, rho0_mpole, rhoz_set, ecoul_1c, rho0_s_rs, rho0_s_gs, do_kpoints, has_unit_metric, requires_mo_derivs, mo_derivs, mo_loc_history, nkind, natom, nelectron_total, nelectron_spin, efield, neighbor_list_id, linres_control, xas_env, virial, cp_ddapc_env, cp_ddapc_ewald, outer_scf_history, outer_scf_ihistory, x_data, et_coupling, dftb_potential, results, se_taper, se_store_int_env, se_nddo_mpole, se_nonbond_env, admm_env, lri_env, lri_density, exstate_env, ec_env, dispersion_env, gcp_env, vee, rho_external, external_vxc, mask, mp2_env, bs_env, kg_env, wanniercentres, atprop, ls_scf_env, do_transport, transport_env, v_hartree_rspace, s_mstruct_changed, rho_changed, potential_changed, forces_up_to_date, mscfg_env, almo_scf_env, gradient_history, variable_history, embed_pot, spin_embed_pot, polar_env, mos_last_converged, rhs)
Get the QUICKSTEP environment.
Build up the plane wave density by collocating the primitive Gaussian functions (pgf).
subroutine, public integrate_v_rspace(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env, calculate_forces, force_adm, compute_tau, gapw, basis_type, pw_env_external, task_list_external)
computes matrix elements corresponding to a given potential
subroutine, public integrate_v_dbasis(v_rspace, matrix_vhxc_dbasis, matrix_p, qs_env, lambda)
Integrate a potential v_rspace over the derivatives of the basis functions < da/dR | V | b > + < a | ...
Define the quickstep kind type and their sub types.
subroutine, public get_qs_kind(qs_kind, basis_set, basis_type, ncgf, nsgf, all_potential, tnadd_potential, gth_potential, sgp_potential, upf_potential, se_parameter, dftb_parameter, xtb_parameter, dftb3_param, zeff, elec_conf, mao, lmax_dftb, alpha_core_charge, ccore_charge, core_charge, core_charge_radius, paw_proj_set, paw_atom, hard_radius, hard0_radius, max_rad_local, covalent_radius, vdw_radius, gpw_r3d_rs_type_forced, harmonics, max_iso_not0, max_s_harm, grid_atom, ngrid_ang, ngrid_rad, lmax_rho0, dft_plus_u_atom, l_of_dft_plus_u, n_of_dft_plus_u, u_minus_j, u_of_dft_plus_u, j_of_dft_plus_u, alpha_of_dft_plus_u, beta_of_dft_plus_u, j0_of_dft_plus_u, occupation_of_dft_plus_u, dispersion, bs_occupation, magnetization, no_optimize, addel, laddel, naddel, orbitals, max_scf, eps_scf, smear, u_ramping, u_minus_j_target, eps_u_ramping, init_u_ramping_each_scf, reltmat, ghost, floating, name, element_symbol, pao_basis_size, pao_potentials, pao_descriptors, nelec)
Get attributes of an atomic kind.
subroutine, public get_qs_kind_set(qs_kind_set, all_potential_present, tnadd_potential_present, gth_potential_present, sgp_potential_present, paw_atom_present, dft_plus_u_atom_present, maxcgf, maxsgf, maxco, maxco_proj, maxgtops, maxlgto, maxlprj, maxnset, maxsgf_set, ncgf, npgf, nset, nsgf, nshell, maxpol, maxlppl, maxlppnl, maxppnl, nelectron, maxder, max_ngrid_rad, max_sph_harm, maxg_iso_not0, lmax_rho0, basis_rcut, basis_type, total_zeff_corr)
Get attributes of an atomic kind set.
Transfers densities from PW to RS grids and potentials from PW to RS.
subroutine, public potential_pw2rs(rs_v, v_rspace, pw_env)
transfers a potential from a pw_grid to a vector of realspace multigrids
generate the tasks lists used by collocate and integrate routines
subroutine, public rs_copy_to_matrices(src_buffer, dest_matrices, task_list)
Copies from buffer into DBCSR matrics, replaces rs_gather_matrix for non-distributed grids.
subroutine, public rs_scatter_matrices(src_matrices, dest_buffer, task_list, group)
Scatters dbcsr matrix blocks and receives them into a buffer as needed before collocation.
subroutine, public rs_distribute_matrix(rs_descs, pmats, atom_pair_send, atom_pair_recv, nimages, scatter, hmats)
redistributes the matrix so that it can be used in realspace operations i.e. according to the task li...
subroutine, public rs_gather_matrices(src_buffer, dest_matrices, task_list, group)
Gather the dbcsr matrix blocks and receives them into a buffer as needed after integration.
subroutine, public rs_copy_to_buffer(src_matrices, dest_buffer, task_list)
Copies the DBCSR blocks into buffer, replaces rs_scatter_matrix for non-distributed grids.
types for task lists
Provides all information about an atomic kind.
Type defining parameters related to the simulation cell.
Definition cell_types.F:55
contained for different pw related things
Provides all information about a quickstep kind.