(git:ec11232)
Loading...
Searching...
No Matches
pw_methods.F
Go to the documentation of this file.
1!--------------------------------------------------------------------------------------------------!
2! CP2K: A general program to perform molecular dynamics simulations !
3! Copyright 2000-2025 CP2K developers group <https://cp2k.org> !
4! !
5! SPDX-License-Identifier: GPL-2.0-or-later !
6!--------------------------------------------------------------------------------------------------!
7
8! **************************************************************************************************
9!> \note
10!> If parallel mode is distributed certain combination of
11!> "in_use" and "in_space" can not be used.
12!> For performance reasons it would be better to have the loops
13!> over g-vectors in the gather/scatter routines in new subprograms
14!> with the actual arrays (also the addressing) in the parameter list
15!> \par History
16!> JGH (29-Dec-2000) : Changes for parallel use
17!> JGH (13-Mar-2001) : added timing calls
18!> JGH (26-Feb-2003) : OpenMP enabled
19!> JGH (17-Nov-2007) : Removed mass arrays
20!> JGH (01-Dec-2007) : Removed and renamed routines
21!> JGH (04-Jul-2019) : added pw_multiply routine
22!> 03.2008 [tlaino] : Splitting pw_types into pw_types and pw_methods
23!> \author apsi
24! **************************************************************************************************
26
27
30 USE fft_tools, ONLY: bwfft, &
31 fwfft, &
32 fft3d
35 USE kinds, ONLY: dp
36 USE machine, ONLY: m_memory
37 USE mathconstants, ONLY: gaussi, &
38 z_zero
39 USE pw_copy_all, ONLY: pw_copy_match
40 USE pw_fpga, ONLY: pw_fpga_c1dr3d_3d_dp, &
45 USE pw_gpu, ONLY: pw_gpu_c1dr3d_3d, &
49 USE pw_grid_types, ONLY: halfspace, &
53 USE pw_types, ONLY: pw_r1d_rs_type
54 USE pw_types, ONLY: pw_r3d_rs_type
55 USE pw_types, ONLY: pw_c1d_rs_type
56 USE pw_types, ONLY: pw_c3d_rs_type
57 USE pw_types, ONLY: pw_r1d_gs_type
58 USE pw_types, ONLY: pw_r3d_gs_type
59 USE pw_types, ONLY: pw_c1d_gs_type
60 USE pw_types, ONLY: pw_c3d_gs_type
61#include "../base/base_uses.f90"
62
63 IMPLICIT NONE
64
65 PRIVATE
66
74 PUBLIC :: pw_set, pw_truncated
75 PUBLIC :: pw_scatter, pw_gather
77
78 CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'pw_methods'
79 LOGICAL, PARAMETER, PRIVATE :: debug_this_module = .false.
80 INTEGER, PARAMETER, PUBLIC :: do_accurate_sum = 0, &
82
83 INTERFACE pw_zero
84 MODULE PROCEDURE pw_zero_r1d_rs
85 MODULE PROCEDURE pw_zero_r3d_rs
86 MODULE PROCEDURE pw_zero_c1d_rs
87 MODULE PROCEDURE pw_zero_c3d_rs
88 MODULE PROCEDURE pw_zero_r1d_gs
89 MODULE PROCEDURE pw_zero_r3d_gs
90 MODULE PROCEDURE pw_zero_c1d_gs
91 MODULE PROCEDURE pw_zero_c3d_gs
92 END INTERFACE
93
94 INTERFACE pw_scale
95 MODULE PROCEDURE pw_scale_r1d_rs
96 MODULE PROCEDURE pw_scale_r3d_rs
97 MODULE PROCEDURE pw_scale_c1d_rs
98 MODULE PROCEDURE pw_scale_c3d_rs
99 MODULE PROCEDURE pw_scale_r1d_gs
100 MODULE PROCEDURE pw_scale_r3d_gs
101 MODULE PROCEDURE pw_scale_c1d_gs
102 MODULE PROCEDURE pw_scale_c3d_gs
103 END INTERFACE
104
105 INTERFACE pw_write
106 MODULE PROCEDURE pw_write_r1d_rs
107 MODULE PROCEDURE pw_write_r3d_rs
108 MODULE PROCEDURE pw_write_c1d_rs
109 MODULE PROCEDURE pw_write_c3d_rs
110 MODULE PROCEDURE pw_write_r1d_gs
111 MODULE PROCEDURE pw_write_r3d_gs
112 MODULE PROCEDURE pw_write_c1d_gs
113 MODULE PROCEDURE pw_write_c3d_gs
114 END INTERFACE
115
117 MODULE PROCEDURE pw_integrate_function_r1d_rs
118 MODULE PROCEDURE pw_integrate_function_r3d_rs
119 MODULE PROCEDURE pw_integrate_function_c1d_rs
120 MODULE PROCEDURE pw_integrate_function_c3d_rs
121 MODULE PROCEDURE pw_integrate_function_r1d_gs
122 MODULE PROCEDURE pw_integrate_function_r3d_gs
123 MODULE PROCEDURE pw_integrate_function_c1d_gs
124 MODULE PROCEDURE pw_integrate_function_c3d_gs
125 END INTERFACE
126
127 INTERFACE pw_set
128 MODULE PROCEDURE pw_set_value_r1d_rs
129 MODULE PROCEDURE pw_zero_r1d_rs
130 MODULE PROCEDURE pw_set_value_r3d_rs
131 MODULE PROCEDURE pw_zero_r3d_rs
132 MODULE PROCEDURE pw_set_value_c1d_rs
133 MODULE PROCEDURE pw_zero_c1d_rs
134 MODULE PROCEDURE pw_set_value_c3d_rs
135 MODULE PROCEDURE pw_zero_c3d_rs
136 MODULE PROCEDURE pw_set_value_r1d_gs
137 MODULE PROCEDURE pw_zero_r1d_gs
138 MODULE PROCEDURE pw_set_value_r3d_gs
139 MODULE PROCEDURE pw_zero_r3d_gs
140 MODULE PROCEDURE pw_set_value_c1d_gs
141 MODULE PROCEDURE pw_zero_c1d_gs
142 MODULE PROCEDURE pw_set_value_c3d_gs
143 MODULE PROCEDURE pw_zero_c3d_gs
144 END INTERFACE
145
146 INTERFACE pw_copy
147 MODULE PROCEDURE pw_copy_r1d_r1d_rs
148 MODULE PROCEDURE pw_copy_r1d_c1d_rs
149 MODULE PROCEDURE pw_copy_r3d_r3d_rs
150 MODULE PROCEDURE pw_copy_r3d_c3d_rs
151 MODULE PROCEDURE pw_copy_c1d_r1d_rs
152 MODULE PROCEDURE pw_copy_c1d_c1d_rs
153 MODULE PROCEDURE pw_copy_c3d_r3d_rs
154 MODULE PROCEDURE pw_copy_c3d_c3d_rs
155 MODULE PROCEDURE pw_copy_r1d_r1d_gs
156 MODULE PROCEDURE pw_copy_r1d_c1d_gs
157 MODULE PROCEDURE pw_copy_r3d_r3d_gs
158 MODULE PROCEDURE pw_copy_r3d_c3d_gs
159 MODULE PROCEDURE pw_copy_c1d_r1d_gs
160 MODULE PROCEDURE pw_copy_c1d_c1d_gs
161 MODULE PROCEDURE pw_copy_c3d_r3d_gs
162 MODULE PROCEDURE pw_copy_c3d_c3d_gs
163 END INTERFACE
164
165 INTERFACE pw_axpy
166 MODULE PROCEDURE pw_axpy_r1d_r1d_rs
167 MODULE PROCEDURE pw_axpy_r1d_c1d_rs
168 MODULE PROCEDURE pw_axpy_r3d_r3d_rs
169 MODULE PROCEDURE pw_axpy_r3d_c3d_rs
170 MODULE PROCEDURE pw_axpy_c1d_r1d_rs
171 MODULE PROCEDURE pw_axpy_c1d_c1d_rs
172 MODULE PROCEDURE pw_axpy_c3d_r3d_rs
173 MODULE PROCEDURE pw_axpy_c3d_c3d_rs
174 MODULE PROCEDURE pw_axpy_r1d_r1d_gs
175 MODULE PROCEDURE pw_axpy_r1d_c1d_gs
176 MODULE PROCEDURE pw_axpy_r3d_r3d_gs
177 MODULE PROCEDURE pw_axpy_r3d_c3d_gs
178 MODULE PROCEDURE pw_axpy_c1d_r1d_gs
179 MODULE PROCEDURE pw_axpy_c1d_c1d_gs
180 MODULE PROCEDURE pw_axpy_c3d_r3d_gs
181 MODULE PROCEDURE pw_axpy_c3d_c3d_gs
182 END INTERFACE
183
184 INTERFACE pw_multiply
185 MODULE PROCEDURE pw_multiply_r1d_r1d_rs
186 MODULE PROCEDURE pw_multiply_r1d_c1d_rs
187 MODULE PROCEDURE pw_multiply_r3d_r3d_rs
188 MODULE PROCEDURE pw_multiply_r3d_c3d_rs
189 MODULE PROCEDURE pw_multiply_c1d_r1d_rs
190 MODULE PROCEDURE pw_multiply_c1d_c1d_rs
191 MODULE PROCEDURE pw_multiply_c3d_r3d_rs
192 MODULE PROCEDURE pw_multiply_c3d_c3d_rs
193 MODULE PROCEDURE pw_multiply_r1d_r1d_gs
194 MODULE PROCEDURE pw_multiply_r1d_c1d_gs
195 MODULE PROCEDURE pw_multiply_r3d_r3d_gs
196 MODULE PROCEDURE pw_multiply_r3d_c3d_gs
197 MODULE PROCEDURE pw_multiply_c1d_r1d_gs
198 MODULE PROCEDURE pw_multiply_c1d_c1d_gs
199 MODULE PROCEDURE pw_multiply_c3d_r3d_gs
200 MODULE PROCEDURE pw_multiply_c3d_c3d_gs
201 END INTERFACE
202
204 MODULE PROCEDURE pw_multiply_with_r1d_r1d_rs
205 MODULE PROCEDURE pw_multiply_with_r1d_c1d_rs
206 MODULE PROCEDURE pw_multiply_with_r3d_r3d_rs
207 MODULE PROCEDURE pw_multiply_with_r3d_c3d_rs
208 MODULE PROCEDURE pw_multiply_with_c1d_r1d_rs
209 MODULE PROCEDURE pw_multiply_with_c1d_c1d_rs
210 MODULE PROCEDURE pw_multiply_with_c3d_r3d_rs
211 MODULE PROCEDURE pw_multiply_with_c3d_c3d_rs
212 MODULE PROCEDURE pw_multiply_with_r1d_r1d_gs
213 MODULE PROCEDURE pw_multiply_with_r1d_c1d_gs
214 MODULE PROCEDURE pw_multiply_with_r3d_r3d_gs
215 MODULE PROCEDURE pw_multiply_with_r3d_c3d_gs
216 MODULE PROCEDURE pw_multiply_with_c1d_r1d_gs
217 MODULE PROCEDURE pw_multiply_with_c1d_c1d_gs
218 MODULE PROCEDURE pw_multiply_with_c3d_r3d_gs
219 MODULE PROCEDURE pw_multiply_with_c3d_c3d_gs
220 END INTERFACE
221
223 MODULE PROCEDURE pw_integral_ab_r1d_r1d_rs
224 MODULE PROCEDURE pw_integral_ab_r1d_c1d_rs
225 MODULE PROCEDURE pw_integral_ab_r3d_r3d_rs
226 MODULE PROCEDURE pw_integral_ab_r3d_c3d_rs
227 MODULE PROCEDURE pw_integral_ab_c1d_r1d_rs
228 MODULE PROCEDURE pw_integral_ab_c1d_c1d_rs
229 MODULE PROCEDURE pw_integral_ab_c3d_r3d_rs
230 MODULE PROCEDURE pw_integral_ab_c3d_c3d_rs
231 MODULE PROCEDURE pw_integral_ab_r1d_r1d_gs
232 MODULE PROCEDURE pw_integral_ab_r1d_c1d_gs
233 MODULE PROCEDURE pw_integral_ab_r3d_r3d_gs
234 MODULE PROCEDURE pw_integral_ab_r3d_c3d_gs
235 MODULE PROCEDURE pw_integral_ab_c1d_r1d_gs
236 MODULE PROCEDURE pw_integral_ab_c1d_c1d_gs
237 MODULE PROCEDURE pw_integral_ab_c3d_r3d_gs
238 MODULE PROCEDURE pw_integral_ab_c3d_c3d_gs
239 END INTERFACE
240
242 MODULE PROCEDURE pw_integral_a2b_r1d_r1d
243 MODULE PROCEDURE pw_integral_a2b_r1d_c1d
244 MODULE PROCEDURE pw_integral_a2b_c1d_r1d
245 MODULE PROCEDURE pw_integral_a2b_c1d_c1d
246 END INTERFACE
247
248 INTERFACE pw_gather
249 MODULE PROCEDURE pw_gather_p_r1d
250 MODULE PROCEDURE pw_gather_p_c1d
251 MODULE PROCEDURE pw_gather_s_r1d_r3d
252 MODULE PROCEDURE pw_gather_s_r1d_c3d
253 MODULE PROCEDURE pw_gather_s_c1d_r3d
254 MODULE PROCEDURE pw_gather_s_c1d_c3d
255 END INTERFACE
256
257 INTERFACE pw_scatter
258 MODULE PROCEDURE pw_scatter_p_r1d
259 MODULE PROCEDURE pw_scatter_p_c1d
260 MODULE PROCEDURE pw_scatter_s_r1d_r3d
261 MODULE PROCEDURE pw_scatter_s_r1d_c3d
262 MODULE PROCEDURE pw_scatter_s_c1d_r3d
263 MODULE PROCEDURE pw_scatter_s_c1d_c3d
264 END INTERFACE
265
267 MODULE PROCEDURE pw_copy_to_array_r1d_r1d_rs
268 MODULE PROCEDURE pw_copy_to_array_r1d_c1d_rs
269 MODULE PROCEDURE pw_copy_to_array_r3d_r3d_rs
270 MODULE PROCEDURE pw_copy_to_array_r3d_c3d_rs
271 MODULE PROCEDURE pw_copy_to_array_c1d_r1d_rs
272 MODULE PROCEDURE pw_copy_to_array_c1d_c1d_rs
273 MODULE PROCEDURE pw_copy_to_array_c3d_r3d_rs
274 MODULE PROCEDURE pw_copy_to_array_c3d_c3d_rs
275 MODULE PROCEDURE pw_copy_to_array_r1d_r1d_gs
276 MODULE PROCEDURE pw_copy_to_array_r1d_c1d_gs
277 MODULE PROCEDURE pw_copy_to_array_r3d_r3d_gs
278 MODULE PROCEDURE pw_copy_to_array_r3d_c3d_gs
279 MODULE PROCEDURE pw_copy_to_array_c1d_r1d_gs
280 MODULE PROCEDURE pw_copy_to_array_c1d_c1d_gs
281 MODULE PROCEDURE pw_copy_to_array_c3d_r3d_gs
282 MODULE PROCEDURE pw_copy_to_array_c3d_c3d_gs
283 END INTERFACE
284
286 MODULE PROCEDURE pw_copy_from_array_r1d_r1d_rs
287 MODULE PROCEDURE pw_copy_from_array_r1d_c1d_rs
288 MODULE PROCEDURE pw_copy_from_array_r3d_r3d_rs
289 MODULE PROCEDURE pw_copy_from_array_r3d_c3d_rs
290 MODULE PROCEDURE pw_copy_from_array_c1d_r1d_rs
291 MODULE PROCEDURE pw_copy_from_array_c1d_c1d_rs
292 MODULE PROCEDURE pw_copy_from_array_c3d_r3d_rs
293 MODULE PROCEDURE pw_copy_from_array_c3d_c3d_rs
294 MODULE PROCEDURE pw_copy_from_array_r1d_r1d_gs
295 MODULE PROCEDURE pw_copy_from_array_r1d_c1d_gs
296 MODULE PROCEDURE pw_copy_from_array_r3d_r3d_gs
297 MODULE PROCEDURE pw_copy_from_array_r3d_c3d_gs
298 MODULE PROCEDURE pw_copy_from_array_c1d_r1d_gs
299 MODULE PROCEDURE pw_copy_from_array_c1d_c1d_gs
300 MODULE PROCEDURE pw_copy_from_array_c3d_r3d_gs
301 MODULE PROCEDURE pw_copy_from_array_c3d_c3d_gs
302 END INTERFACE
303
304 INTERFACE pw_transfer
305 MODULE PROCEDURE pw_copy_r1d_r1d_rs
306 MODULE PROCEDURE pw_copy_r1d_r1d_gs
307 MODULE PROCEDURE pw_gather_s_r1d_r3d_2
308 MODULE PROCEDURE pw_scatter_s_r1d_r3d_2
309 MODULE PROCEDURE pw_copy_r1d_c1d_rs
310 MODULE PROCEDURE pw_copy_r1d_c1d_gs
311 MODULE PROCEDURE pw_gather_s_r1d_c3d_2
312 MODULE PROCEDURE pw_scatter_s_r1d_c3d_2
313 MODULE PROCEDURE pw_copy_r3d_r3d_rs
314 MODULE PROCEDURE pw_copy_r3d_r3d_gs
315 MODULE PROCEDURE fft_wrap_pw1pw2_r3d_c1d_rs_gs
316 MODULE PROCEDURE pw_copy_r3d_c3d_rs
317 MODULE PROCEDURE pw_copy_r3d_c3d_gs
318 MODULE PROCEDURE fft_wrap_pw1pw2_r3d_c3d_rs_gs
319 MODULE PROCEDURE pw_copy_c1d_r1d_rs
320 MODULE PROCEDURE pw_copy_c1d_r1d_gs
321 MODULE PROCEDURE pw_gather_s_c1d_r3d_2
322 MODULE PROCEDURE pw_scatter_s_c1d_r3d_2
323 MODULE PROCEDURE fft_wrap_pw1pw2_c1d_r3d_gs_rs
324 MODULE PROCEDURE pw_copy_c1d_c1d_rs
325 MODULE PROCEDURE pw_copy_c1d_c1d_gs
326 MODULE PROCEDURE pw_gather_s_c1d_c3d_2
327 MODULE PROCEDURE pw_scatter_s_c1d_c3d_2
328 MODULE PROCEDURE fft_wrap_pw1pw2_c1d_c3d_gs_rs
329 MODULE PROCEDURE pw_copy_c3d_r3d_rs
330 MODULE PROCEDURE pw_copy_c3d_r3d_gs
331 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_r3d_gs_rs
332 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c1d_rs_gs
333 MODULE PROCEDURE pw_copy_c3d_c3d_rs
334 MODULE PROCEDURE pw_copy_c3d_c3d_gs
335 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c3d_rs_gs
336 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c3d_gs_rs
337 END INTERFACE
338
339CONTAINS
340! **************************************************************************************************
341!> \brief Set values of a pw type to zero
342!> \param pw ...
343!> \par History
344!> none
345!> \author apsi
346! **************************************************************************************************
347 SUBROUTINE pw_zero_r1d_rs (pw)
348
349 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw
350
351 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
352
353 INTEGER :: handle
354
355 CALL timeset(routinen, handle)
356
357#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
358!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
359 pw%array = 0.0_dp
360!$OMP END PARALLEL WORKSHARE
361#else
362 pw%array = 0.0_dp
363#endif
364
365 CALL timestop(handle)
366
367 END SUBROUTINE pw_zero_r1d_rs
368
369! **************************************************************************************************
370!> \brief multiplies pw coeffs with a number
371!> \param pw ...
372!> \param a ...
373!> \par History
374!> 11.2004 created [Joost VandeVondele]
375! **************************************************************************************************
376 SUBROUTINE pw_scale_r1d_rs (pw, a)
377
378 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw
379 REAL(KIND=dp), INTENT(IN) :: a
380
381 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
382
383 INTEGER :: handle
384
385 CALL timeset(routinen, handle)
386
387!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
388 pw%array = a*pw%array
389!$OMP END PARALLEL WORKSHARE
390
391 CALL timestop(handle)
392
393 END SUBROUTINE pw_scale_r1d_rs
394
395! **************************************************************************************************
396!> \brief writes a small description of the actual grid
397!> (change to output the data as cube file, maybe with an
398!> optional long_description arg?)
399!> \param pw the pw data to output
400!> \param unit_nr the unit to output to
401!> \par History
402!> 08.2002 created [fawzi]
403!> \author Fawzi Mohamed
404! **************************************************************************************************
405 SUBROUTINE pw_write_r1d_rs (pw, unit_nr)
406
407 TYPE(pw_r1d_rs_type), INTENT(in) :: pw
408 INTEGER, INTENT(in) :: unit_nr
409
410 INTEGER :: iostatus
411
412 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
413
414 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r1d"
415 IF (ASSOCIATED(pw%array)) THEN
416 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,')>')") &
417 lbound(pw%array, 1), ubound(pw%array, 1)
418 ELSE
419 WRITE (unit=unit_nr, fmt="(' array=*null*')")
420 END IF
421
422 END SUBROUTINE pw_write_r1d_rs
423
424! **************************************************************************************************
425!> \brief ...
426!> \param fun ...
427!> \param isign ...
428!> \param oprt ...
429!> \return ...
430! **************************************************************************************************
431 FUNCTION pw_integrate_function_r1d_rs (fun, isign, oprt) RESULT(total_fun)
432
433 TYPE(pw_r1d_rs_type), INTENT(IN) :: fun
434 INTEGER, INTENT(IN), OPTIONAL :: isign
435 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
436 REAL(kind=dp) :: total_fun
437
438 INTEGER :: iop
439
440 iop = 0
441
442 IF (PRESENT(oprt)) THEN
443 SELECT CASE (oprt)
444 CASE ("ABS", "abs")
445 iop = 1
446 CASE DEFAULT
447 cpabort("Unknown operator")
448 END SELECT
449 END IF
450
451 total_fun = 0.0_dp
452
453 ! do reduction using maximum accuracy
454 IF (iop == 1) THEN
455 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
456 ELSE
457 total_fun = fun%pw_grid%dvol*accurate_sum( fun%array)
458 END IF
459
460 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
461 CALL fun%pw_grid%para%group%sum(total_fun)
462 END IF
463
464 IF (PRESENT(isign)) THEN
465 total_fun = total_fun*sign(1._dp, real(isign, dp))
466 END IF
467
468 END FUNCTION pw_integrate_function_r1d_rs
469
470! **************************************************************************************************
471!> \brief ...
472!> \param pw ...
473!> \param value ...
474! **************************************************************************************************
475 SUBROUTINE pw_set_value_r1d_rs (pw, value)
476 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
477 REAL(KIND=dp), INTENT(IN) :: value
478
479 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
480
481 INTEGER :: handle
482
483 CALL timeset(routinen, handle)
484
485!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
486 pw%array = value
487!$OMP END PARALLEL WORKSHARE
488
489 CALL timestop(handle)
490
491 END SUBROUTINE pw_set_value_r1d_rs
492! **************************************************************************************************
493!> \brief Set values of a pw type to zero
494!> \param pw ...
495!> \par History
496!> none
497!> \author apsi
498! **************************************************************************************************
499 SUBROUTINE pw_zero_r1d_gs (pw)
500
501 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
502
503 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
504
505 INTEGER :: handle
506
507 CALL timeset(routinen, handle)
508
509#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
510!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
511 pw%array = 0.0_dp
512!$OMP END PARALLEL WORKSHARE
513#else
514 pw%array = 0.0_dp
515#endif
516
517 CALL timestop(handle)
518
519 END SUBROUTINE pw_zero_r1d_gs
520
521! **************************************************************************************************
522!> \brief multiplies pw coeffs with a number
523!> \param pw ...
524!> \param a ...
525!> \par History
526!> 11.2004 created [Joost VandeVondele]
527! **************************************************************************************************
528 SUBROUTINE pw_scale_r1d_gs (pw, a)
529
530 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
531 REAL(KIND=dp), INTENT(IN) :: a
532
533 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
534
535 INTEGER :: handle
536
537 CALL timeset(routinen, handle)
538
539!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
540 pw%array = a*pw%array
541!$OMP END PARALLEL WORKSHARE
542
543 CALL timestop(handle)
544
545 END SUBROUTINE pw_scale_r1d_gs
546
547! **************************************************************************************************
548!> \brief writes a small description of the actual grid
549!> (change to output the data as cube file, maybe with an
550!> optional long_description arg?)
551!> \param pw the pw data to output
552!> \param unit_nr the unit to output to
553!> \par History
554!> 08.2002 created [fawzi]
555!> \author Fawzi Mohamed
556! **************************************************************************************************
557 SUBROUTINE pw_write_r1d_gs (pw, unit_nr)
558
559 TYPE(pw_r1d_gs_type), INTENT(in) :: pw
560 INTEGER, INTENT(in) :: unit_nr
561
562 INTEGER :: iostatus
563
564 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
565
566 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r1d"
567 IF (ASSOCIATED(pw%array)) THEN
568 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,')>')") &
569 lbound(pw%array, 1), ubound(pw%array, 1)
570 ELSE
571 WRITE (unit=unit_nr, fmt="(' array=*null*')")
572 END IF
573
574 END SUBROUTINE pw_write_r1d_gs
575
576! **************************************************************************************************
577!> \brief ...
578!> \param fun ...
579!> \param isign ...
580!> \param oprt ...
581!> \return ...
582! **************************************************************************************************
583 FUNCTION pw_integrate_function_r1d_gs (fun, isign, oprt) RESULT(total_fun)
584
585 TYPE(pw_r1d_gs_type), INTENT(IN) :: fun
586 INTEGER, INTENT(IN), OPTIONAL :: isign
587 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
588 REAL(kind=dp) :: total_fun
589
590 INTEGER :: iop
591
592 iop = 0
593
594 IF (PRESENT(oprt)) THEN
595 SELECT CASE (oprt)
596 CASE ("ABS", "abs")
597 iop = 1
598 CASE DEFAULT
599 cpabort("Unknown operator")
600 END SELECT
601 END IF
602
603 total_fun = 0.0_dp
604
605 IF (iop == 1) &
606 cpabort("Operator ABS not implemented")
607 IF (fun%pw_grid%have_g0) total_fun = fun%pw_grid%vol* fun%array(1)
608
609 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
610 CALL fun%pw_grid%para%group%sum(total_fun)
611 END IF
612
613 IF (PRESENT(isign)) THEN
614 total_fun = total_fun*sign(1._dp, real(isign, dp))
615 END IF
616
617 END FUNCTION pw_integrate_function_r1d_gs
618
619! **************************************************************************************************
620!> \brief ...
621!> \param pw ...
622!> \param value ...
623! **************************************************************************************************
624 SUBROUTINE pw_set_value_r1d_gs (pw, value)
625 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
626 REAL(KIND=dp), INTENT(IN) :: value
627
628 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
629
630 INTEGER :: handle
631
632 CALL timeset(routinen, handle)
633
634!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
635 pw%array = value
636!$OMP END PARALLEL WORKSHARE
637
638 CALL timestop(handle)
639
640 END SUBROUTINE pw_set_value_r1d_gs
641
642! **************************************************************************************************
643!> \brief ...
644!> \param pw ...
645!> \param c ...
646!> \param scale ...
647! **************************************************************************************************
648 SUBROUTINE pw_gather_p_r1d (pw, c, scale)
649
650 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
651 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(IN) :: c
652 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
653
654 CHARACTER(len=*), PARAMETER :: routineN = 'pw_gather_p'
655
656 INTEGER :: gpt, handle, l, m, mn, n
657
658 CALL timeset(routinen, handle)
659
660 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
661 cpabort("This grid type is not distributed")
662 END IF
663
664 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
665 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq)
666
667 IF (PRESENT(scale)) THEN
668!$OMP PARALLEL DO DEFAULT(NONE) &
669!$OMP PRIVATE(l, m, mn, n) &
670!$OMP SHARED(c, pw, scale)
671 DO gpt = 1, ngpts
672 l = mapl(ghat(1, gpt)) + 1
673 m = mapm(ghat(2, gpt)) + 1
674 n = mapn(ghat(3, gpt)) + 1
675 mn = yzq(m, n)
676 pw%array(gpt) = scale* real(c(l, mn), kind=dp)
677 END DO
678!$OMP END PARALLEL DO
679 ELSE
680!$OMP PARALLEL DO DEFAULT(NONE) &
681!$OMP PRIVATE(l, m, mn, n) &
682!$OMP SHARED(c, pw)
683 DO gpt = 1, ngpts
684 l = mapl(ghat(1, gpt)) + 1
685 m = mapm(ghat(2, gpt)) + 1
686 n = mapn(ghat(3, gpt)) + 1
687 mn = yzq(m, n)
688 pw%array(gpt) = real(c(l, mn), kind=dp)
689 END DO
690!$OMP END PARALLEL DO
691 END IF
692
693 END associate
694
695 CALL timestop(handle)
696
697 END SUBROUTINE pw_gather_p_r1d
698
699! **************************************************************************************************
700!> \brief ...
701!> \param pw ...
702!> \param c ...
703!> \param scale ...
704! **************************************************************************************************
705 SUBROUTINE pw_scatter_p_r1d (pw, c, scale)
706 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
707 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(INOUT) :: c
708 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
709
710 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scatter_p'
711
712 INTEGER :: gpt, handle, l, m, mn, n
713
714 CALL timeset(routinen, handle)
715
716 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
717 cpabort("This grid type is not distributed")
718 END IF
719
720 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
721 ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq, ngpts => SIZE(pw%pw_grid%gsq))
722
723 IF (.NOT. PRESENT(scale)) c = z_zero
724
725 IF (PRESENT(scale)) THEN
726!$OMP PARALLEL DO DEFAULT(NONE) &
727!$OMP PRIVATE(l, m, mn, n) &
728!$OMP SHARED(c, pw, scale)
729 DO gpt = 1, ngpts
730 l = mapl(ghat(1, gpt)) + 1
731 m = mapm(ghat(2, gpt)) + 1
732 n = mapn(ghat(3, gpt)) + 1
733 mn = yzq(m, n)
734 c(l, mn) = cmplx(scale*pw%array(gpt), 0.0_dp, kind=dp)
735 END DO
736!$OMP END PARALLEL DO
737 ELSE
738!$OMP PARALLEL DO DEFAULT(NONE) &
739!$OMP PRIVATE(l, m, mn, n) &
740!$OMP SHARED(c, pw)
741 DO gpt = 1, ngpts
742 l = mapl(ghat(1, gpt)) + 1
743 m = mapm(ghat(2, gpt)) + 1
744 n = mapn(ghat(3, gpt)) + 1
745 mn = yzq(m, n)
746 c(l, mn) = cmplx(pw%array(gpt), 0.0_dp, kind=dp)
747 END DO
748!$OMP END PARALLEL DO
749 END IF
750
751 END associate
752
753 IF (pw%pw_grid%grid_span == halfspace) THEN
754
755 associate(mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, mapl => pw%pw_grid%mapl%neg, &
756 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq), yzq => pw%pw_grid%para%yzq)
757
758 IF (PRESENT(scale)) THEN
759!$OMP PARALLEL DO DEFAULT(NONE) &
760!$OMP PRIVATE(l, m, mn, n) &
761!$OMP SHARED(c, pw, scale)
762 DO gpt = 1, ngpts
763 l = mapl(ghat(1, gpt)) + 1
764 m = mapm(ghat(2, gpt)) + 1
765 n = mapn(ghat(3, gpt)) + 1
766 mn = yzq(m, n)
767 c(l, mn) = scale*( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
768 END DO
769!$OMP END PARALLEL DO
770 ELSE
771!$OMP PARALLEL DO DEFAULT(NONE) &
772!$OMP PRIVATE(l, m, mn, n) &
773!$OMP SHARED(c, pw)
774 DO gpt = 1, ngpts
775 l = mapl(ghat(1, gpt)) + 1
776 m = mapm(ghat(2, gpt)) + 1
777 n = mapn(ghat(3, gpt)) + 1
778 mn = yzq(m, n)
779 c(l, mn) = ( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
780 END DO
781!$OMP END PARALLEL DO
782 END IF
783 END associate
784 END IF
785
786 CALL timestop(handle)
787
788 END SUBROUTINE pw_scatter_p_r1d
789! **************************************************************************************************
790!> \brief Set values of a pw type to zero
791!> \param pw ...
792!> \par History
793!> none
794!> \author apsi
795! **************************************************************************************************
796 SUBROUTINE pw_zero_r3d_rs (pw)
797
798 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw
799
800 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
801
802 INTEGER :: handle
803
804 CALL timeset(routinen, handle)
805
806#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
807!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
808 pw%array = 0.0_dp
809!$OMP END PARALLEL WORKSHARE
810#else
811 pw%array = 0.0_dp
812#endif
813
814 CALL timestop(handle)
815
816 END SUBROUTINE pw_zero_r3d_rs
817
818! **************************************************************************************************
819!> \brief multiplies pw coeffs with a number
820!> \param pw ...
821!> \param a ...
822!> \par History
823!> 11.2004 created [Joost VandeVondele]
824! **************************************************************************************************
825 SUBROUTINE pw_scale_r3d_rs (pw, a)
826
827 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw
828 REAL(KIND=dp), INTENT(IN) :: a
829
830 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
831
832 INTEGER :: handle
833
834 CALL timeset(routinen, handle)
835
836!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
837 pw%array = a*pw%array
838!$OMP END PARALLEL WORKSHARE
839
840 CALL timestop(handle)
841
842 END SUBROUTINE pw_scale_r3d_rs
843
844! **************************************************************************************************
845!> \brief writes a small description of the actual grid
846!> (change to output the data as cube file, maybe with an
847!> optional long_description arg?)
848!> \param pw the pw data to output
849!> \param unit_nr the unit to output to
850!> \par History
851!> 08.2002 created [fawzi]
852!> \author Fawzi Mohamed
853! **************************************************************************************************
854 SUBROUTINE pw_write_r3d_rs (pw, unit_nr)
855
856 TYPE(pw_r3d_rs_type), INTENT(in) :: pw
857 INTEGER, INTENT(in) :: unit_nr
858
859 INTEGER :: iostatus
860
861 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
862
863 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r3d"
864 IF (ASSOCIATED(pw%array)) THEN
865 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
866 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
867 lbound(pw%array, 3), ubound(pw%array, 3)
868 ELSE
869 WRITE (unit=unit_nr, fmt="(' array=*null*')")
870 END IF
871
872 END SUBROUTINE pw_write_r3d_rs
873
874! **************************************************************************************************
875!> \brief ...
876!> \param fun ...
877!> \param isign ...
878!> \param oprt ...
879!> \return ...
880! **************************************************************************************************
881 FUNCTION pw_integrate_function_r3d_rs (fun, isign, oprt) RESULT(total_fun)
882
883 TYPE(pw_r3d_rs_type), INTENT(IN) :: fun
884 INTEGER, INTENT(IN), OPTIONAL :: isign
885 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
886 REAL(kind=dp) :: total_fun
887
888 INTEGER :: iop
889
890 iop = 0
891
892 IF (PRESENT(oprt)) THEN
893 SELECT CASE (oprt)
894 CASE ("ABS", "abs")
895 iop = 1
896 CASE DEFAULT
897 cpabort("Unknown operator")
898 END SELECT
899 END IF
900
901 total_fun = 0.0_dp
902
903 ! do reduction using maximum accuracy
904 IF (iop == 1) THEN
905 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
906 ELSE
907 total_fun = fun%pw_grid%dvol*accurate_sum( fun%array)
908 END IF
909
910 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
911 CALL fun%pw_grid%para%group%sum(total_fun)
912 END IF
913
914 IF (PRESENT(isign)) THEN
915 total_fun = total_fun*sign(1._dp, real(isign, dp))
916 END IF
917
918 END FUNCTION pw_integrate_function_r3d_rs
919
920! **************************************************************************************************
921!> \brief ...
922!> \param pw ...
923!> \param value ...
924! **************************************************************************************************
925 SUBROUTINE pw_set_value_r3d_rs (pw, value)
926 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
927 REAL(KIND=dp), INTENT(IN) :: value
928
929 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
930
931 INTEGER :: handle
932
933 CALL timeset(routinen, handle)
934
935!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
936 pw%array = value
937!$OMP END PARALLEL WORKSHARE
938
939 CALL timestop(handle)
940
941 END SUBROUTINE pw_set_value_r3d_rs
942! **************************************************************************************************
943!> \brief Set values of a pw type to zero
944!> \param pw ...
945!> \par History
946!> none
947!> \author apsi
948! **************************************************************************************************
949 SUBROUTINE pw_zero_r3d_gs (pw)
950
951 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw
952
953 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
954
955 INTEGER :: handle
956
957 CALL timeset(routinen, handle)
958
959#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
960!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
961 pw%array = 0.0_dp
962!$OMP END PARALLEL WORKSHARE
963#else
964 pw%array = 0.0_dp
965#endif
966
967 CALL timestop(handle)
968
969 END SUBROUTINE pw_zero_r3d_gs
970
971! **************************************************************************************************
972!> \brief multiplies pw coeffs with a number
973!> \param pw ...
974!> \param a ...
975!> \par History
976!> 11.2004 created [Joost VandeVondele]
977! **************************************************************************************************
978 SUBROUTINE pw_scale_r3d_gs (pw, a)
979
980 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw
981 REAL(KIND=dp), INTENT(IN) :: a
982
983 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
984
985 INTEGER :: handle
986
987 CALL timeset(routinen, handle)
988
989!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
990 pw%array = a*pw%array
991!$OMP END PARALLEL WORKSHARE
992
993 CALL timestop(handle)
994
995 END SUBROUTINE pw_scale_r3d_gs
996
997! **************************************************************************************************
998!> \brief writes a small description of the actual grid
999!> (change to output the data as cube file, maybe with an
1000!> optional long_description arg?)
1001!> \param pw the pw data to output
1002!> \param unit_nr the unit to output to
1003!> \par History
1004!> 08.2002 created [fawzi]
1005!> \author Fawzi Mohamed
1006! **************************************************************************************************
1007 SUBROUTINE pw_write_r3d_gs (pw, unit_nr)
1008
1009 TYPE(pw_r3d_gs_type), INTENT(in) :: pw
1010 INTEGER, INTENT(in) :: unit_nr
1011
1012 INTEGER :: iostatus
1013
1014 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1015
1016 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r3d"
1017 IF (ASSOCIATED(pw%array)) THEN
1018 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1019 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1020 lbound(pw%array, 3), ubound(pw%array, 3)
1021 ELSE
1022 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1023 END IF
1024
1025 END SUBROUTINE pw_write_r3d_gs
1026
1027! **************************************************************************************************
1028!> \brief ...
1029!> \param fun ...
1030!> \param isign ...
1031!> \param oprt ...
1032!> \return ...
1033! **************************************************************************************************
1034 FUNCTION pw_integrate_function_r3d_gs (fun, isign, oprt) RESULT(total_fun)
1035
1036 TYPE(pw_r3d_gs_type), INTENT(IN) :: fun
1037 INTEGER, INTENT(IN), OPTIONAL :: isign
1038 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1039 REAL(kind=dp) :: total_fun
1040
1041 INTEGER :: iop
1042
1043 iop = 0
1044
1045 IF (PRESENT(oprt)) THEN
1046 SELECT CASE (oprt)
1047 CASE ("ABS", "abs")
1048 iop = 1
1049 CASE DEFAULT
1050 cpabort("Unknown operator")
1051 END SELECT
1052 END IF
1053
1054 total_fun = 0.0_dp
1055
1056 IF (iop == 1) &
1057 cpabort("Operator ABS not implemented")
1058 cpabort("Reciprocal space integration for 3D grids not implemented!")
1059
1060 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1061 CALL fun%pw_grid%para%group%sum(total_fun)
1062 END IF
1063
1064 IF (PRESENT(isign)) THEN
1065 total_fun = total_fun*sign(1._dp, real(isign, dp))
1066 END IF
1067
1068 END FUNCTION pw_integrate_function_r3d_gs
1069
1070! **************************************************************************************************
1071!> \brief ...
1072!> \param pw ...
1073!> \param value ...
1074! **************************************************************************************************
1075 SUBROUTINE pw_set_value_r3d_gs (pw, value)
1076 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
1077 REAL(KIND=dp), INTENT(IN) :: value
1078
1079 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1080
1081 INTEGER :: handle
1082
1083 CALL timeset(routinen, handle)
1084
1085!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1086 pw%array = value
1087!$OMP END PARALLEL WORKSHARE
1088
1089 CALL timestop(handle)
1090
1091 END SUBROUTINE pw_set_value_r3d_gs
1092
1093! **************************************************************************************************
1094!> \brief Set values of a pw type to zero
1095!> \param pw ...
1096!> \par History
1097!> none
1098!> \author apsi
1099! **************************************************************************************************
1100 SUBROUTINE pw_zero_c1d_rs (pw)
1101
1102 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw
1103
1104 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1105
1106 INTEGER :: handle
1107
1108 CALL timeset(routinen, handle)
1109
1110#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1111!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1112 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1113!$OMP END PARALLEL WORKSHARE
1114#else
1115 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1116#endif
1117
1118 CALL timestop(handle)
1119
1120 END SUBROUTINE pw_zero_c1d_rs
1121
1122! **************************************************************************************************
1123!> \brief multiplies pw coeffs with a number
1124!> \param pw ...
1125!> \param a ...
1126!> \par History
1127!> 11.2004 created [Joost VandeVondele]
1128! **************************************************************************************************
1129 SUBROUTINE pw_scale_c1d_rs (pw, a)
1130
1131 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw
1132 REAL(KIND=dp), INTENT(IN) :: a
1133
1134 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1135
1136 INTEGER :: handle
1137
1138 CALL timeset(routinen, handle)
1139
1140!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1141 pw%array = a*pw%array
1142!$OMP END PARALLEL WORKSHARE
1143
1144 CALL timestop(handle)
1145
1146 END SUBROUTINE pw_scale_c1d_rs
1147
1148! **************************************************************************************************
1149!> \brief writes a small description of the actual grid
1150!> (change to output the data as cube file, maybe with an
1151!> optional long_description arg?)
1152!> \param pw the pw data to output
1153!> \param unit_nr the unit to output to
1154!> \par History
1155!> 08.2002 created [fawzi]
1156!> \author Fawzi Mohamed
1157! **************************************************************************************************
1158 SUBROUTINE pw_write_c1d_rs (pw, unit_nr)
1159
1160 TYPE(pw_c1d_rs_type), INTENT(in) :: pw
1161 INTEGER, INTENT(in) :: unit_nr
1162
1163 INTEGER :: iostatus
1164
1165 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1166
1167 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c1d"
1168 IF (ASSOCIATED(pw%array)) THEN
1169 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,')>')") &
1170 lbound(pw%array, 1), ubound(pw%array, 1)
1171 ELSE
1172 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1173 END IF
1174
1175 END SUBROUTINE pw_write_c1d_rs
1176
1177! **************************************************************************************************
1178!> \brief ...
1179!> \param fun ...
1180!> \param isign ...
1181!> \param oprt ...
1182!> \return ...
1183! **************************************************************************************************
1184 FUNCTION pw_integrate_function_c1d_rs (fun, isign, oprt) RESULT(total_fun)
1185
1186 TYPE(pw_c1d_rs_type), INTENT(IN) :: fun
1187 INTEGER, INTENT(IN), OPTIONAL :: isign
1188 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1189 REAL(kind=dp) :: total_fun
1190
1191 INTEGER :: iop
1192
1193 iop = 0
1194
1195 IF (PRESENT(oprt)) THEN
1196 SELECT CASE (oprt)
1197 CASE ("ABS", "abs")
1198 iop = 1
1199 CASE DEFAULT
1200 cpabort("Unknown operator")
1201 END SELECT
1202 END IF
1203
1204 total_fun = 0.0_dp
1205
1206 ! do reduction using maximum accuracy
1207 IF (iop == 1) THEN
1208 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
1209 ELSE
1210 total_fun = fun%pw_grid%dvol*accurate_sum( real(fun%array, kind=dp))
1211 END IF
1212
1213 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1214 CALL fun%pw_grid%para%group%sum(total_fun)
1215 END IF
1216
1217 IF (PRESENT(isign)) THEN
1218 total_fun = total_fun*sign(1._dp, real(isign, dp))
1219 END IF
1220
1221 END FUNCTION pw_integrate_function_c1d_rs
1222
1223! **************************************************************************************************
1224!> \brief ...
1225!> \param pw ...
1226!> \param value ...
1227! **************************************************************************************************
1228 SUBROUTINE pw_set_value_c1d_rs (pw, value)
1229 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
1230 REAL(KIND=dp), INTENT(IN) :: value
1231
1232 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1233
1234 INTEGER :: handle
1235
1236 CALL timeset(routinen, handle)
1237
1238!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1239 pw%array = cmplx(value, 0.0_dp, kind=dp)
1240!$OMP END PARALLEL WORKSHARE
1241
1242 CALL timestop(handle)
1243
1244 END SUBROUTINE pw_set_value_c1d_rs
1245! **************************************************************************************************
1246!> \brief Set values of a pw type to zero
1247!> \param pw ...
1248!> \par History
1249!> none
1250!> \author apsi
1251! **************************************************************************************************
1252 SUBROUTINE pw_zero_c1d_gs (pw)
1253
1254 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1255
1256 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1257
1258 INTEGER :: handle
1259
1260 CALL timeset(routinen, handle)
1261
1262#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1263!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1264 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1265!$OMP END PARALLEL WORKSHARE
1266#else
1267 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1268#endif
1269
1270 CALL timestop(handle)
1271
1272 END SUBROUTINE pw_zero_c1d_gs
1273
1274! **************************************************************************************************
1275!> \brief multiplies pw coeffs with a number
1276!> \param pw ...
1277!> \param a ...
1278!> \par History
1279!> 11.2004 created [Joost VandeVondele]
1280! **************************************************************************************************
1281 SUBROUTINE pw_scale_c1d_gs (pw, a)
1282
1283 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1284 REAL(KIND=dp), INTENT(IN) :: a
1285
1286 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1287
1288 INTEGER :: handle
1289
1290 CALL timeset(routinen, handle)
1291
1292!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1293 pw%array = a*pw%array
1294!$OMP END PARALLEL WORKSHARE
1295
1296 CALL timestop(handle)
1297
1298 END SUBROUTINE pw_scale_c1d_gs
1299
1300! **************************************************************************************************
1301!> \brief writes a small description of the actual grid
1302!> (change to output the data as cube file, maybe with an
1303!> optional long_description arg?)
1304!> \param pw the pw data to output
1305!> \param unit_nr the unit to output to
1306!> \par History
1307!> 08.2002 created [fawzi]
1308!> \author Fawzi Mohamed
1309! **************************************************************************************************
1310 SUBROUTINE pw_write_c1d_gs (pw, unit_nr)
1311
1312 TYPE(pw_c1d_gs_type), INTENT(in) :: pw
1313 INTEGER, INTENT(in) :: unit_nr
1314
1315 INTEGER :: iostatus
1316
1317 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1318
1319 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c1d"
1320 IF (ASSOCIATED(pw%array)) THEN
1321 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,')>')") &
1322 lbound(pw%array, 1), ubound(pw%array, 1)
1323 ELSE
1324 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1325 END IF
1326
1327 END SUBROUTINE pw_write_c1d_gs
1328
1329! **************************************************************************************************
1330!> \brief ...
1331!> \param fun ...
1332!> \param isign ...
1333!> \param oprt ...
1334!> \return ...
1335! **************************************************************************************************
1336 FUNCTION pw_integrate_function_c1d_gs (fun, isign, oprt) RESULT(total_fun)
1337
1338 TYPE(pw_c1d_gs_type), INTENT(IN) :: fun
1339 INTEGER, INTENT(IN), OPTIONAL :: isign
1340 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1341 REAL(kind=dp) :: total_fun
1342
1343 INTEGER :: iop
1344
1345 iop = 0
1346
1347 IF (PRESENT(oprt)) THEN
1348 SELECT CASE (oprt)
1349 CASE ("ABS", "abs")
1350 iop = 1
1351 CASE DEFAULT
1352 cpabort("Unknown operator")
1353 END SELECT
1354 END IF
1355
1356 total_fun = 0.0_dp
1357
1358 IF (iop == 1) &
1359 cpabort("Operator ABS not implemented")
1360 IF (fun%pw_grid%have_g0) total_fun = fun%pw_grid%vol* real(fun%array(1), kind=dp)
1361
1362 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1363 CALL fun%pw_grid%para%group%sum(total_fun)
1364 END IF
1365
1366 IF (PRESENT(isign)) THEN
1367 total_fun = total_fun*sign(1._dp, real(isign, dp))
1368 END IF
1369
1370 END FUNCTION pw_integrate_function_c1d_gs
1371
1372! **************************************************************************************************
1373!> \brief ...
1374!> \param pw ...
1375!> \param value ...
1376! **************************************************************************************************
1377 SUBROUTINE pw_set_value_c1d_gs (pw, value)
1378 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
1379 REAL(KIND=dp), INTENT(IN) :: value
1380
1381 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1382
1383 INTEGER :: handle
1384
1385 CALL timeset(routinen, handle)
1386
1387!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1388 pw%array = cmplx(value, 0.0_dp, kind=dp)
1389!$OMP END PARALLEL WORKSHARE
1390
1391 CALL timestop(handle)
1392
1393 END SUBROUTINE pw_set_value_c1d_gs
1394
1395! **************************************************************************************************
1396!> \brief ...
1397!> \param pw ...
1398!> \param c ...
1399!> \param scale ...
1400! **************************************************************************************************
1401 SUBROUTINE pw_gather_p_c1d (pw, c, scale)
1402
1403 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1404 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(IN) :: c
1405 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
1406
1407 CHARACTER(len=*), PARAMETER :: routineN = 'pw_gather_p'
1408
1409 INTEGER :: gpt, handle, l, m, mn, n
1410
1411 CALL timeset(routinen, handle)
1412
1413 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
1414 cpabort("This grid type is not distributed")
1415 END IF
1416
1417 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
1418 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq)
1419
1420 IF (PRESENT(scale)) THEN
1421!$OMP PARALLEL DO DEFAULT(NONE) &
1422!$OMP PRIVATE(l, m, mn, n) &
1423!$OMP SHARED(c, pw, scale)
1424 DO gpt = 1, ngpts
1425 l = mapl(ghat(1, gpt)) + 1
1426 m = mapm(ghat(2, gpt)) + 1
1427 n = mapn(ghat(3, gpt)) + 1
1428 mn = yzq(m, n)
1429 pw%array(gpt) = scale* c(l, mn)
1430 END DO
1431!$OMP END PARALLEL DO
1432 ELSE
1433!$OMP PARALLEL DO DEFAULT(NONE) &
1434!$OMP PRIVATE(l, m, mn, n) &
1435!$OMP SHARED(c, pw)
1436 DO gpt = 1, ngpts
1437 l = mapl(ghat(1, gpt)) + 1
1438 m = mapm(ghat(2, gpt)) + 1
1439 n = mapn(ghat(3, gpt)) + 1
1440 mn = yzq(m, n)
1441 pw%array(gpt) = c(l, mn)
1442 END DO
1443!$OMP END PARALLEL DO
1444 END IF
1445
1446 END associate
1447
1448 CALL timestop(handle)
1449
1450 END SUBROUTINE pw_gather_p_c1d
1451
1452! **************************************************************************************************
1453!> \brief ...
1454!> \param pw ...
1455!> \param c ...
1456!> \param scale ...
1457! **************************************************************************************************
1458 SUBROUTINE pw_scatter_p_c1d (pw, c, scale)
1459 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
1460 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(INOUT) :: c
1461 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
1462
1463 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scatter_p'
1464
1465 INTEGER :: gpt, handle, l, m, mn, n
1466
1467 CALL timeset(routinen, handle)
1468
1469 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
1470 cpabort("This grid type is not distributed")
1471 END IF
1472
1473 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
1474 ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq, ngpts => SIZE(pw%pw_grid%gsq))
1475
1476 IF (.NOT. PRESENT(scale)) c = z_zero
1477
1478 IF (PRESENT(scale)) THEN
1479!$OMP PARALLEL DO DEFAULT(NONE) &
1480!$OMP PRIVATE(l, m, mn, n) &
1481!$OMP SHARED(c, pw, scale)
1482 DO gpt = 1, ngpts
1483 l = mapl(ghat(1, gpt)) + 1
1484 m = mapm(ghat(2, gpt)) + 1
1485 n = mapn(ghat(3, gpt)) + 1
1486 mn = yzq(m, n)
1487 c(l, mn) = scale*pw%array(gpt)
1488 END DO
1489!$OMP END PARALLEL DO
1490 ELSE
1491!$OMP PARALLEL DO DEFAULT(NONE) &
1492!$OMP PRIVATE(l, m, mn, n) &
1493!$OMP SHARED(c, pw)
1494 DO gpt = 1, ngpts
1495 l = mapl(ghat(1, gpt)) + 1
1496 m = mapm(ghat(2, gpt)) + 1
1497 n = mapn(ghat(3, gpt)) + 1
1498 mn = yzq(m, n)
1499 c(l, mn) = pw%array(gpt)
1500 END DO
1501!$OMP END PARALLEL DO
1502 END IF
1503
1504 END associate
1505
1506 IF (pw%pw_grid%grid_span == halfspace) THEN
1507
1508 associate(mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, mapl => pw%pw_grid%mapl%neg, &
1509 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq), yzq => pw%pw_grid%para%yzq)
1510
1511 IF (PRESENT(scale)) THEN
1512!$OMP PARALLEL DO DEFAULT(NONE) &
1513!$OMP PRIVATE(l, m, mn, n) &
1514!$OMP SHARED(c, pw, scale)
1515 DO gpt = 1, ngpts
1516 l = mapl(ghat(1, gpt)) + 1
1517 m = mapm(ghat(2, gpt)) + 1
1518 n = mapn(ghat(3, gpt)) + 1
1519 mn = yzq(m, n)
1520 c(l, mn) = scale*conjg( pw%array(gpt))
1521 END DO
1522!$OMP END PARALLEL DO
1523 ELSE
1524!$OMP PARALLEL DO DEFAULT(NONE) &
1525!$OMP PRIVATE(l, m, mn, n) &
1526!$OMP SHARED(c, pw)
1527 DO gpt = 1, ngpts
1528 l = mapl(ghat(1, gpt)) + 1
1529 m = mapm(ghat(2, gpt)) + 1
1530 n = mapn(ghat(3, gpt)) + 1
1531 mn = yzq(m, n)
1532 c(l, mn) = conjg( pw%array(gpt))
1533 END DO
1534!$OMP END PARALLEL DO
1535 END IF
1536 END associate
1537 END IF
1538
1539 CALL timestop(handle)
1540
1541 END SUBROUTINE pw_scatter_p_c1d
1542! **************************************************************************************************
1543!> \brief Set values of a pw type to zero
1544!> \param pw ...
1545!> \par History
1546!> none
1547!> \author apsi
1548! **************************************************************************************************
1549 SUBROUTINE pw_zero_c3d_rs (pw)
1550
1551 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw
1552
1553 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1554
1555 INTEGER :: handle
1556
1557 CALL timeset(routinen, handle)
1558
1559#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1560!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1561 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1562!$OMP END PARALLEL WORKSHARE
1563#else
1564 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1565#endif
1566
1567 CALL timestop(handle)
1568
1569 END SUBROUTINE pw_zero_c3d_rs
1570
1571! **************************************************************************************************
1572!> \brief multiplies pw coeffs with a number
1573!> \param pw ...
1574!> \param a ...
1575!> \par History
1576!> 11.2004 created [Joost VandeVondele]
1577! **************************************************************************************************
1578 SUBROUTINE pw_scale_c3d_rs (pw, a)
1579
1580 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw
1581 REAL(KIND=dp), INTENT(IN) :: a
1582
1583 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1584
1585 INTEGER :: handle
1586
1587 CALL timeset(routinen, handle)
1588
1589!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1590 pw%array = a*pw%array
1591!$OMP END PARALLEL WORKSHARE
1592
1593 CALL timestop(handle)
1594
1595 END SUBROUTINE pw_scale_c3d_rs
1596
1597! **************************************************************************************************
1598!> \brief writes a small description of the actual grid
1599!> (change to output the data as cube file, maybe with an
1600!> optional long_description arg?)
1601!> \param pw the pw data to output
1602!> \param unit_nr the unit to output to
1603!> \par History
1604!> 08.2002 created [fawzi]
1605!> \author Fawzi Mohamed
1606! **************************************************************************************************
1607 SUBROUTINE pw_write_c3d_rs (pw, unit_nr)
1608
1609 TYPE(pw_c3d_rs_type), INTENT(in) :: pw
1610 INTEGER, INTENT(in) :: unit_nr
1611
1612 INTEGER :: iostatus
1613
1614 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1615
1616 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c3d"
1617 IF (ASSOCIATED(pw%array)) THEN
1618 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1619 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1620 lbound(pw%array, 3), ubound(pw%array, 3)
1621 ELSE
1622 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1623 END IF
1624
1625 END SUBROUTINE pw_write_c3d_rs
1626
1627! **************************************************************************************************
1628!> \brief ...
1629!> \param fun ...
1630!> \param isign ...
1631!> \param oprt ...
1632!> \return ...
1633! **************************************************************************************************
1634 FUNCTION pw_integrate_function_c3d_rs (fun, isign, oprt) RESULT(total_fun)
1635
1636 TYPE(pw_c3d_rs_type), INTENT(IN) :: fun
1637 INTEGER, INTENT(IN), OPTIONAL :: isign
1638 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1639 REAL(kind=dp) :: total_fun
1640
1641 INTEGER :: iop
1642
1643 iop = 0
1644
1645 IF (PRESENT(oprt)) THEN
1646 SELECT CASE (oprt)
1647 CASE ("ABS", "abs")
1648 iop = 1
1649 CASE DEFAULT
1650 cpabort("Unknown operator")
1651 END SELECT
1652 END IF
1653
1654 total_fun = 0.0_dp
1655
1656 ! do reduction using maximum accuracy
1657 IF (iop == 1) THEN
1658 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
1659 ELSE
1660 total_fun = fun%pw_grid%dvol*accurate_sum( real(fun%array, kind=dp))
1661 END IF
1662
1663 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1664 CALL fun%pw_grid%para%group%sum(total_fun)
1665 END IF
1666
1667 IF (PRESENT(isign)) THEN
1668 total_fun = total_fun*sign(1._dp, real(isign, dp))
1669 END IF
1670
1671 END FUNCTION pw_integrate_function_c3d_rs
1672
1673! **************************************************************************************************
1674!> \brief ...
1675!> \param pw ...
1676!> \param value ...
1677! **************************************************************************************************
1678 SUBROUTINE pw_set_value_c3d_rs (pw, value)
1679 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
1680 REAL(KIND=dp), INTENT(IN) :: value
1681
1682 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1683
1684 INTEGER :: handle
1685
1686 CALL timeset(routinen, handle)
1687
1688!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1689 pw%array = cmplx(value, 0.0_dp, kind=dp)
1690!$OMP END PARALLEL WORKSHARE
1691
1692 CALL timestop(handle)
1693
1694 END SUBROUTINE pw_set_value_c3d_rs
1695! **************************************************************************************************
1696!> \brief Set values of a pw type to zero
1697!> \param pw ...
1698!> \par History
1699!> none
1700!> \author apsi
1701! **************************************************************************************************
1702 SUBROUTINE pw_zero_c3d_gs (pw)
1703
1704 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw
1705
1706 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1707
1708 INTEGER :: handle
1709
1710 CALL timeset(routinen, handle)
1711
1712#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1713!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1714 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1715!$OMP END PARALLEL WORKSHARE
1716#else
1717 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1718#endif
1719
1720 CALL timestop(handle)
1721
1722 END SUBROUTINE pw_zero_c3d_gs
1723
1724! **************************************************************************************************
1725!> \brief multiplies pw coeffs with a number
1726!> \param pw ...
1727!> \param a ...
1728!> \par History
1729!> 11.2004 created [Joost VandeVondele]
1730! **************************************************************************************************
1731 SUBROUTINE pw_scale_c3d_gs (pw, a)
1732
1733 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw
1734 REAL(KIND=dp), INTENT(IN) :: a
1735
1736 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1737
1738 INTEGER :: handle
1739
1740 CALL timeset(routinen, handle)
1741
1742!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1743 pw%array = a*pw%array
1744!$OMP END PARALLEL WORKSHARE
1745
1746 CALL timestop(handle)
1747
1748 END SUBROUTINE pw_scale_c3d_gs
1749
1750! **************************************************************************************************
1751!> \brief writes a small description of the actual grid
1752!> (change to output the data as cube file, maybe with an
1753!> optional long_description arg?)
1754!> \param pw the pw data to output
1755!> \param unit_nr the unit to output to
1756!> \par History
1757!> 08.2002 created [fawzi]
1758!> \author Fawzi Mohamed
1759! **************************************************************************************************
1760 SUBROUTINE pw_write_c3d_gs (pw, unit_nr)
1761
1762 TYPE(pw_c3d_gs_type), INTENT(in) :: pw
1763 INTEGER, INTENT(in) :: unit_nr
1764
1765 INTEGER :: iostatus
1766
1767 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1768
1769 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c3d"
1770 IF (ASSOCIATED(pw%array)) THEN
1771 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1772 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1773 lbound(pw%array, 3), ubound(pw%array, 3)
1774 ELSE
1775 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1776 END IF
1777
1778 END SUBROUTINE pw_write_c3d_gs
1779
1780! **************************************************************************************************
1781!> \brief ...
1782!> \param fun ...
1783!> \param isign ...
1784!> \param oprt ...
1785!> \return ...
1786! **************************************************************************************************
1787 FUNCTION pw_integrate_function_c3d_gs (fun, isign, oprt) RESULT(total_fun)
1788
1789 TYPE(pw_c3d_gs_type), INTENT(IN) :: fun
1790 INTEGER, INTENT(IN), OPTIONAL :: isign
1791 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1792 REAL(kind=dp) :: total_fun
1793
1794 INTEGER :: iop
1795
1796 iop = 0
1797
1798 IF (PRESENT(oprt)) THEN
1799 SELECT CASE (oprt)
1800 CASE ("ABS", "abs")
1801 iop = 1
1802 CASE DEFAULT
1803 cpabort("Unknown operator")
1804 END SELECT
1805 END IF
1806
1807 total_fun = 0.0_dp
1808
1809 IF (iop == 1) &
1810 cpabort("Operator ABS not implemented")
1811 cpabort("Reciprocal space integration for 3D grids not implemented!")
1812
1813 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1814 CALL fun%pw_grid%para%group%sum(total_fun)
1815 END IF
1816
1817 IF (PRESENT(isign)) THEN
1818 total_fun = total_fun*sign(1._dp, real(isign, dp))
1819 END IF
1820
1821 END FUNCTION pw_integrate_function_c3d_gs
1822
1823! **************************************************************************************************
1824!> \brief ...
1825!> \param pw ...
1826!> \param value ...
1827! **************************************************************************************************
1828 SUBROUTINE pw_set_value_c3d_gs (pw, value)
1829 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
1830 REAL(KIND=dp), INTENT(IN) :: value
1831
1832 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1833
1834 INTEGER :: handle
1835
1836 CALL timeset(routinen, handle)
1837
1838!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1839 pw%array = cmplx(value, 0.0_dp, kind=dp)
1840!$OMP END PARALLEL WORKSHARE
1841
1842 CALL timestop(handle)
1843
1844 END SUBROUTINE pw_set_value_c3d_gs
1845
1846
1847! **************************************************************************************************
1848!> \brief copy a pw type variable
1849!> \param pw1 ...
1850!> \param pw2 ...
1851!> \par History
1852!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
1853!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
1854!> JGH (21-Feb-2003) : Code for generalized reference grids
1855!> \author apsi
1856!> \note
1857!> Currently only copying of respective types allowed,
1858!> in order to avoid errors
1859! **************************************************************************************************
1860 SUBROUTINE pw_copy_r1d_r1d_rs (pw1, pw2)
1861
1862 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
1863 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
1864
1865 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
1866
1867 INTEGER :: handle
1868 INTEGER :: i, j, ng, ng1, ng2, ns
1869
1870 CALL timeset(routinen, handle)
1871
1872 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
1873 cpabort("Both grids must be either spherical or non-spherical!")
1874 IF (pw1%pw_grid%spherical) &
1875 cpabort("Spherical grids only exist in reciprocal space!")
1876
1877 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
1878 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
1879 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
1880 ng1 = SIZE(pw1%array)
1881 ng2 = SIZE(pw2%array)
1882 ng = min(ng1, ng2)
1883!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
1884 pw2%array(1:ng) = pw1%array(1:ng)
1885!$OMP END PARALLEL WORKSHARE
1886 IF (ng2 > ng) THEN
1887!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
1888 pw2%array(ng + 1:ng2) = 0.0_dp
1889!$OMP END PARALLEL WORKSHARE
1890 END IF
1891 ELSE
1892 cpabort("Copies between spherical grids require compatible grids!")
1893 END IF
1894 ELSE
1895 ng1 = SIZE(pw1%array)
1896 ng2 = SIZE(pw2%array)
1897 ns = 2*max(ng1, ng2)
1898
1899 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
1900 IF (ng1 >= ng2) THEN
1901!$OMP PARALLEL DO DEFAULT(NONE) &
1902!$OMP PRIVATE(i,j) &
1903!$OMP SHARED(ng2, pw1, pw2)
1904 DO i = 1, ng2
1905 j = pw2%pw_grid%gidx(i)
1906 pw2%array(i) = pw1%array(j)
1907 END DO
1908!$OMP END PARALLEL DO
1909 ELSE
1910 CALL pw_zero(pw2)
1911!$OMP PARALLEL DO DEFAULT(NONE) &
1912!$OMP PRIVATE(i,j) &
1913!$OMP SHARED(ng1, pw1, pw2)
1914 DO i = 1, ng1
1915 j = pw2%pw_grid%gidx(i)
1916 pw2%array(j) = pw1%array(i)
1917 END DO
1918!$OMP END PARALLEL DO
1919 END IF
1920 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
1921 IF (ng1 >= ng2) THEN
1922!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
1923 DO i = 1, ng2
1924 j = pw1%pw_grid%gidx(i)
1925 pw2%array(i) = pw1%array(j)
1926 END DO
1927!$OMP END PARALLEL DO
1928 ELSE
1929 CALL pw_zero(pw2)
1930!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
1931 DO i = 1, ng1
1932 j = pw1%pw_grid%gidx(i)
1933 pw2%array(j) = pw1%array(i)
1934 END DO
1935!$OMP END PARALLEL DO
1936 END IF
1937 ELSE
1938 cpabort("Copy not implemented!")
1939 END IF
1940
1941 END IF
1942
1943 ELSE
1944!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
1945 pw2%array = pw1%array
1946!$OMP END PARALLEL WORKSHARE
1947 END IF
1948
1949 CALL timestop(handle)
1950
1951 END SUBROUTINE pw_copy_r1d_r1d_rs
1952
1953! **************************************************************************************************
1954!> \brief ...
1955!> \param pw ...
1956!> \param array ...
1957! **************************************************************************************************
1958 SUBROUTINE pw_copy_to_array_r1d_r1d_rs (pw, array)
1959 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
1960 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
1961
1962 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
1963
1964 INTEGER :: handle
1965
1966 CALL timeset(routinen, handle)
1967
1968!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
1969 array(:) = pw%array(:)
1970!$OMP END PARALLEL WORKSHARE
1971
1972 CALL timestop(handle)
1973 END SUBROUTINE pw_copy_to_array_r1d_r1d_rs
1974
1975! **************************************************************************************************
1976!> \brief ...
1977!> \param pw ...
1978!> \param array ...
1979! **************************************************************************************************
1980 SUBROUTINE pw_copy_from_array_r1d_r1d_rs (pw, array)
1981 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
1982 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
1983
1984 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
1985
1986 INTEGER :: handle
1987
1988 CALL timeset(routinen, handle)
1989
1990!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
1991 pw%array = array
1992!$OMP END PARALLEL WORKSHARE
1993
1994 CALL timestop(handle)
1995 END SUBROUTINE pw_copy_from_array_r1d_r1d_rs
1996
1997! **************************************************************************************************
1998!> \brief pw2 = alpha*pw1 + beta*pw2
1999!> alpha defaults to 1, beta defaults to 1
2000!> \param pw1 ...
2001!> \param pw2 ...
2002!> \param alpha ...
2003!> \param beta ...
2004!> \param allow_noncompatible_grids ...
2005!> \par History
2006!> JGH (21-Feb-2003) : added reference grid functionality
2007!> JGH (01-Dec-2007) : rename and remove complex alpha
2008!> \author apsi
2009!> \note
2010!> Currently only summing up of respective types allowed,
2011!> in order to avoid errors
2012! **************************************************************************************************
2013 SUBROUTINE pw_axpy_r1d_r1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2014
2015 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2016 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
2017 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2018 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2019
2020 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2021
2022 INTEGER :: handle
2023 LOGICAL :: my_allow_noncompatible_grids
2024 REAL(KIND=dp) :: my_alpha, my_beta
2025 INTEGER :: i, j, ng, ng1, ng2
2026
2027 CALL timeset(routinen, handle)
2028
2029 my_alpha = 1.0_dp
2030 IF (PRESENT(alpha)) my_alpha = alpha
2031
2032 my_beta = 1.0_dp
2033 IF (PRESENT(beta)) my_beta = beta
2034
2035 my_allow_noncompatible_grids = .false.
2036 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2037
2038 IF (my_beta /= 1.0_dp) THEN
2039 IF (my_beta == 0.0_dp) THEN
2040 CALL pw_zero(pw2)
2041 ELSE
2042!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
2043 pw2%array = pw2%array*my_beta
2044!$OMP END PARALLEL WORKSHARE
2045 END IF
2046 END IF
2047
2048 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2049
2050 IF (my_alpha == 1.0_dp) THEN
2051!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2052 pw2%array = pw2%array + pw1%array
2053!$OMP END PARALLEL WORKSHARE
2054 ELSE IF (my_alpha /= 0.0_dp) THEN
2055!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
2056 pw2%array = pw2%array + my_alpha* pw1%array
2057!$OMP END PARALLEL WORKSHARE
2058 END IF
2059
2060 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
2061
2062 ng1 = SIZE(pw1%array)
2063 ng2 = SIZE(pw2%array)
2064 ng = min(ng1, ng2)
2065
2066 IF (pw1%pw_grid%spherical) THEN
2067 IF (my_alpha == 1.0_dp) THEN
2068!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2069 DO i = 1, ng
2070 pw2%array(i) = pw2%array(i) + pw1%array(i)
2071 END DO
2072!$OMP END PARALLEL DO
2073 ELSE IF (my_alpha /= 0.0_dp) THEN
2074!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
2075 DO i = 1, ng
2076 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
2077 END DO
2078!$OMP END PARALLEL DO
2079 END IF
2080 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2081 IF (ng1 >= ng2) THEN
2082 IF (my_alpha == 1.0_dp) THEN
2083!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2084 DO i = 1, ng
2085 j = pw2%pw_grid%gidx(i)
2086 pw2%array(i) = pw2%array(i) + pw1%array(j)
2087 END DO
2088!$OMP END PARALLEL DO
2089 ELSE IF (my_alpha /= 0.0_dp) THEN
2090!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2091 DO i = 1, ng
2092 j = pw2%pw_grid%gidx(i)
2093 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2094 END DO
2095!$OMP END PARALLEL DO
2096 END IF
2097 ELSE
2098 IF (my_alpha == 1.0_dp) THEN
2099!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2100 DO i = 1, ng
2101 j = pw2%pw_grid%gidx(i)
2102 pw2%array(j) = pw2%array(j) + pw1%array(i)
2103 END DO
2104!$OMP END PARALLEL DO
2105 ELSE IF (my_alpha /= 0.0_dp) THEN
2106!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2107 DO i = 1, ng
2108 j = pw2%pw_grid%gidx(i)
2109 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2110 END DO
2111!$OMP END PARALLEL DO
2112 END IF
2113 END IF
2114 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2115 IF (ng1 >= ng2) THEN
2116 IF (my_alpha == 1.0_dp) THEN
2117!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2118 DO i = 1, ng
2119 j = pw1%pw_grid%gidx(i)
2120 pw2%array(i) = pw2%array(i) + pw1%array(j)
2121 END DO
2122!$OMP END PARALLEL DO
2123 ELSE IF (my_alpha /= 0.0_dp) THEN
2124!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2125 DO i = 1, ng
2126 j = pw1%pw_grid%gidx(i)
2127 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2128 END DO
2129!$OMP END PARALLEL DO
2130 END IF
2131 ELSE
2132 IF (my_alpha == 1.0_dp) THEN
2133!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2134 DO i = 1, ng
2135 j = pw1%pw_grid%gidx(i)
2136 pw2%array(j) = pw2%array(j) + pw1%array(i)
2137 END DO
2138!$OMP END PARALLEL DO
2139 ELSE IF (my_alpha /= 0.0_dp) THEN
2140!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2141 DO i = 1, ng
2142 j = pw1%pw_grid%gidx(i)
2143 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2144 END DO
2145!$OMP END PARALLEL DO
2146 END IF
2147 END IF
2148 ELSE
2149 cpabort("Grids not compatible")
2150 END IF
2151
2152 ELSE
2153
2154 cpabort("Grids not compatible")
2155
2156 END IF
2157
2158 CALL timestop(handle)
2159
2160 END SUBROUTINE pw_axpy_r1d_r1d_rs
2161
2162! **************************************************************************************************
2163!> \brief pw_out = pw_out + alpha * pw1 * pw2
2164!> alpha defaults to 1
2165!> \param pw_out ...
2166!> \param pw1 ...
2167!> \param pw2 ...
2168!> \param alpha ...
2169!> \author JGH
2170! **************************************************************************************************
2171 SUBROUTINE pw_multiply_r1d_r1d_rs (pw_out, pw1, pw2, alpha)
2172
2173 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw_out
2174 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2175 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2176 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
2177
2178 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
2179
2180 INTEGER :: handle
2181 REAL(KIND=dp) :: my_alpha
2182
2183 CALL timeset(routinen, handle)
2184
2185 my_alpha = 1.0_dp
2186 IF (PRESENT(alpha)) my_alpha = alpha
2187
2188 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
2189 cpabort("pw_multiply not implemented for non-identical grids!")
2190
2191#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
2192 IF (my_alpha == 1.0_dp) THEN
2193!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
2194 pw_out%array = pw_out%array + pw1%array* pw2%array
2195!$OMP END PARALLEL WORKSHARE
2196 ELSE
2197!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
2198 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2199!$OMP END PARALLEL WORKSHARE
2200 END IF
2201#else
2202 IF (my_alpha == 1.0_dp) THEN
2203 pw_out%array = pw_out%array + pw1%array* pw2%array
2204 ELSE
2205 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2206 END IF
2207#endif
2208
2209 CALL timestop(handle)
2210
2211 END SUBROUTINE pw_multiply_r1d_r1d_rs
2212
2213! **************************************************************************************************
2214!> \brief ...
2215!> \param pw1 ...
2216!> \param pw2 ...
2217! **************************************************************************************************
2218 SUBROUTINE pw_multiply_with_r1d_r1d_rs (pw1, pw2)
2219 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw1
2220 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2221
2222 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
2223
2224 INTEGER :: handle
2225
2226 CALL timeset(routinen, handle)
2227
2228 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
2229 cpabort("Incompatible grids!")
2230
2231!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
2232 pw1%array = pw1%array* pw2%array
2233!$OMP END PARALLEL WORKSHARE
2234
2235 CALL timestop(handle)
2236
2237 END SUBROUTINE pw_multiply_with_r1d_r1d_rs
2238
2239! **************************************************************************************************
2240!> \brief Calculate integral over unit cell for functions in plane wave basis
2241!> only returns the real part of it ......
2242!> \param pw1 ...
2243!> \param pw2 ...
2244!> \param sumtype ...
2245!> \param just_sum ...
2246!> \param local_only ...
2247!> \return ...
2248!> \par History
2249!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
2250!> \author apsi
2251! **************************************************************************************************
2252 FUNCTION pw_integral_ab_r1d_r1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
2253
2254 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2255 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2256 INTEGER, INTENT(IN), OPTIONAL :: sumtype
2257 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
2258 REAL(kind=dp) :: integral_value
2259
2260 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_r1d_rs'
2261
2262 INTEGER :: handle, loc_sumtype
2263 LOGICAL :: my_just_sum, my_local_only
2264
2265 CALL timeset(routinen, handle)
2266
2267 loc_sumtype = do_accurate_sum
2268 IF (PRESENT(sumtype)) loc_sumtype = sumtype
2269
2270 my_local_only = .false.
2271 IF (PRESENT(local_only)) my_local_only = local_only
2272
2273 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2274 cpabort("Grids incompatible")
2275 END IF
2276
2277 my_just_sum = .false.
2278 IF (PRESENT(just_sum)) my_just_sum = just_sum
2279
2280 ! do standard sum
2281 IF (loc_sumtype == do_standard_sum) THEN
2282
2283 ! Do standard sum
2284
2285 integral_value = dot_product(pw1%array, pw2%array)
2286
2287 ELSE
2288
2289 ! Do accurate sum
2290 integral_value = accurate_dot_product(pw1%array, pw2%array)
2291
2292 END IF
2293
2294 IF (.NOT. my_just_sum) THEN
2295 integral_value = integral_value*pw1%pw_grid%dvol
2296 END IF
2297
2298 IF (pw1%pw_grid%grid_span == halfspace) THEN
2299 integral_value = 2.0_dp*integral_value
2300 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
2301 pw1%array(1)*pw2%array(1)
2302 END IF
2303
2304 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
2305 CALL pw1%pw_grid%para%group%sum(integral_value)
2306
2307 CALL timestop(handle)
2308
2309 END FUNCTION pw_integral_ab_r1d_r1d_rs
2310! **************************************************************************************************
2311!> \brief copy a pw type variable
2312!> \param pw1 ...
2313!> \param pw2 ...
2314!> \par History
2315!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
2316!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
2317!> JGH (21-Feb-2003) : Code for generalized reference grids
2318!> \author apsi
2319!> \note
2320!> Currently only copying of respective types allowed,
2321!> in order to avoid errors
2322! **************************************************************************************************
2323 SUBROUTINE pw_copy_r1d_r1d_gs (pw1, pw2)
2324
2325 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2326 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
2327
2328 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
2329
2330 INTEGER :: handle
2331 INTEGER :: i, j, ng, ng1, ng2, ns
2332
2333 CALL timeset(routinen, handle)
2334
2335 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
2336 cpabort("Both grids must be either spherical or non-spherical!")
2337
2338 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2339 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
2340 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
2341 ng1 = SIZE(pw1%array)
2342 ng2 = SIZE(pw2%array)
2343 ng = min(ng1, ng2)
2344!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
2345 pw2%array(1:ng) = pw1%array(1:ng)
2346!$OMP END PARALLEL WORKSHARE
2347 IF (ng2 > ng) THEN
2348!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
2349 pw2%array(ng + 1:ng2) = 0.0_dp
2350!$OMP END PARALLEL WORKSHARE
2351 END IF
2352 ELSE
2353 cpabort("Copies between spherical grids require compatible grids!")
2354 END IF
2355 ELSE
2356 ng1 = SIZE(pw1%array)
2357 ng2 = SIZE(pw2%array)
2358 ns = 2*max(ng1, ng2)
2359
2360 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2361 IF (ng1 >= ng2) THEN
2362!$OMP PARALLEL DO DEFAULT(NONE) &
2363!$OMP PRIVATE(i,j) &
2364!$OMP SHARED(ng2, pw1, pw2)
2365 DO i = 1, ng2
2366 j = pw2%pw_grid%gidx(i)
2367 pw2%array(i) = pw1%array(j)
2368 END DO
2369!$OMP END PARALLEL DO
2370 ELSE
2371 CALL pw_zero(pw2)
2372!$OMP PARALLEL DO DEFAULT(NONE) &
2373!$OMP PRIVATE(i,j) &
2374!$OMP SHARED(ng1, pw1, pw2)
2375 DO i = 1, ng1
2376 j = pw2%pw_grid%gidx(i)
2377 pw2%array(j) = pw1%array(i)
2378 END DO
2379!$OMP END PARALLEL DO
2380 END IF
2381 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2382 IF (ng1 >= ng2) THEN
2383!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2384 DO i = 1, ng2
2385 j = pw1%pw_grid%gidx(i)
2386 pw2%array(i) = pw1%array(j)
2387 END DO
2388!$OMP END PARALLEL DO
2389 ELSE
2390 CALL pw_zero(pw2)
2391!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2392 DO i = 1, ng1
2393 j = pw1%pw_grid%gidx(i)
2394 pw2%array(j) = pw1%array(i)
2395 END DO
2396!$OMP END PARALLEL DO
2397 END IF
2398 ELSE
2399 cpabort("Copy not implemented!")
2400 END IF
2401
2402 END IF
2403
2404 ELSE
2405!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2406 pw2%array = pw1%array
2407!$OMP END PARALLEL WORKSHARE
2408 END IF
2409
2410 CALL timestop(handle)
2411
2412 END SUBROUTINE pw_copy_r1d_r1d_gs
2413
2414! **************************************************************************************************
2415!> \brief ...
2416!> \param pw ...
2417!> \param array ...
2418! **************************************************************************************************
2419 SUBROUTINE pw_copy_to_array_r1d_r1d_gs (pw, array)
2420 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
2421 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
2422
2423 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
2424
2425 INTEGER :: handle
2426
2427 CALL timeset(routinen, handle)
2428
2429!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2430 array(:) = pw%array(:)
2431!$OMP END PARALLEL WORKSHARE
2432
2433 CALL timestop(handle)
2434 END SUBROUTINE pw_copy_to_array_r1d_r1d_gs
2435
2436! **************************************************************************************************
2437!> \brief ...
2438!> \param pw ...
2439!> \param array ...
2440! **************************************************************************************************
2441 SUBROUTINE pw_copy_from_array_r1d_r1d_gs (pw, array)
2442 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
2443 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
2444
2445 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
2446
2447 INTEGER :: handle
2448
2449 CALL timeset(routinen, handle)
2450
2451!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2452 pw%array = array
2453!$OMP END PARALLEL WORKSHARE
2454
2455 CALL timestop(handle)
2456 END SUBROUTINE pw_copy_from_array_r1d_r1d_gs
2457
2458! **************************************************************************************************
2459!> \brief pw2 = alpha*pw1 + beta*pw2
2460!> alpha defaults to 1, beta defaults to 1
2461!> \param pw1 ...
2462!> \param pw2 ...
2463!> \param alpha ...
2464!> \param beta ...
2465!> \param allow_noncompatible_grids ...
2466!> \par History
2467!> JGH (21-Feb-2003) : added reference grid functionality
2468!> JGH (01-Dec-2007) : rename and remove complex alpha
2469!> \author apsi
2470!> \note
2471!> Currently only summing up of respective types allowed,
2472!> in order to avoid errors
2473! **************************************************************************************************
2474 SUBROUTINE pw_axpy_r1d_r1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2475
2476 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2477 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
2478 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2479 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2480
2481 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2482
2483 INTEGER :: handle
2484 LOGICAL :: my_allow_noncompatible_grids
2485 REAL(KIND=dp) :: my_alpha, my_beta
2486 INTEGER :: i, j, ng, ng1, ng2
2487
2488 CALL timeset(routinen, handle)
2489
2490 my_alpha = 1.0_dp
2491 IF (PRESENT(alpha)) my_alpha = alpha
2492
2493 my_beta = 1.0_dp
2494 IF (PRESENT(beta)) my_beta = beta
2495
2496 my_allow_noncompatible_grids = .false.
2497 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2498
2499 IF (my_beta /= 1.0_dp) THEN
2500 IF (my_beta == 0.0_dp) THEN
2501 CALL pw_zero(pw2)
2502 ELSE
2503!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
2504 pw2%array = pw2%array*my_beta
2505!$OMP END PARALLEL WORKSHARE
2506 END IF
2507 END IF
2508
2509 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2510
2511 IF (my_alpha == 1.0_dp) THEN
2512!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2513 pw2%array = pw2%array + pw1%array
2514!$OMP END PARALLEL WORKSHARE
2515 ELSE IF (my_alpha /= 0.0_dp) THEN
2516!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
2517 pw2%array = pw2%array + my_alpha* pw1%array
2518!$OMP END PARALLEL WORKSHARE
2519 END IF
2520
2521 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
2522
2523 ng1 = SIZE(pw1%array)
2524 ng2 = SIZE(pw2%array)
2525 ng = min(ng1, ng2)
2526
2527 IF (pw1%pw_grid%spherical) THEN
2528 IF (my_alpha == 1.0_dp) THEN
2529!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2530 DO i = 1, ng
2531 pw2%array(i) = pw2%array(i) + pw1%array(i)
2532 END DO
2533!$OMP END PARALLEL DO
2534 ELSE IF (my_alpha /= 0.0_dp) THEN
2535!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
2536 DO i = 1, ng
2537 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
2538 END DO
2539!$OMP END PARALLEL DO
2540 END IF
2541 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2542 IF (ng1 >= ng2) THEN
2543 IF (my_alpha == 1.0_dp) THEN
2544!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2545 DO i = 1, ng
2546 j = pw2%pw_grid%gidx(i)
2547 pw2%array(i) = pw2%array(i) + pw1%array(j)
2548 END DO
2549!$OMP END PARALLEL DO
2550 ELSE IF (my_alpha /= 0.0_dp) THEN
2551!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2552 DO i = 1, ng
2553 j = pw2%pw_grid%gidx(i)
2554 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2555 END DO
2556!$OMP END PARALLEL DO
2557 END IF
2558 ELSE
2559 IF (my_alpha == 1.0_dp) THEN
2560!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2561 DO i = 1, ng
2562 j = pw2%pw_grid%gidx(i)
2563 pw2%array(j) = pw2%array(j) + pw1%array(i)
2564 END DO
2565!$OMP END PARALLEL DO
2566 ELSE IF (my_alpha /= 0.0_dp) THEN
2567!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2568 DO i = 1, ng
2569 j = pw2%pw_grid%gidx(i)
2570 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2571 END DO
2572!$OMP END PARALLEL DO
2573 END IF
2574 END IF
2575 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2576 IF (ng1 >= ng2) THEN
2577 IF (my_alpha == 1.0_dp) THEN
2578!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2579 DO i = 1, ng
2580 j = pw1%pw_grid%gidx(i)
2581 pw2%array(i) = pw2%array(i) + pw1%array(j)
2582 END DO
2583!$OMP END PARALLEL DO
2584 ELSE IF (my_alpha /= 0.0_dp) THEN
2585!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2586 DO i = 1, ng
2587 j = pw1%pw_grid%gidx(i)
2588 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2589 END DO
2590!$OMP END PARALLEL DO
2591 END IF
2592 ELSE
2593 IF (my_alpha == 1.0_dp) THEN
2594!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2595 DO i = 1, ng
2596 j = pw1%pw_grid%gidx(i)
2597 pw2%array(j) = pw2%array(j) + pw1%array(i)
2598 END DO
2599!$OMP END PARALLEL DO
2600 ELSE IF (my_alpha /= 0.0_dp) THEN
2601!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2602 DO i = 1, ng
2603 j = pw1%pw_grid%gidx(i)
2604 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2605 END DO
2606!$OMP END PARALLEL DO
2607 END IF
2608 END IF
2609 ELSE
2610 cpabort("Grids not compatible")
2611 END IF
2612
2613 ELSE
2614
2615 cpabort("Grids not compatible")
2616
2617 END IF
2618
2619 CALL timestop(handle)
2620
2621 END SUBROUTINE pw_axpy_r1d_r1d_gs
2622
2623! **************************************************************************************************
2624!> \brief pw_out = pw_out + alpha * pw1 * pw2
2625!> alpha defaults to 1
2626!> \param pw_out ...
2627!> \param pw1 ...
2628!> \param pw2 ...
2629!> \param alpha ...
2630!> \author JGH
2631! **************************************************************************************************
2632 SUBROUTINE pw_multiply_r1d_r1d_gs (pw_out, pw1, pw2, alpha)
2633
2634 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw_out
2635 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2636 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2637 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
2638
2639 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
2640
2641 INTEGER :: handle
2642 REAL(KIND=dp) :: my_alpha
2643
2644 CALL timeset(routinen, handle)
2645
2646 my_alpha = 1.0_dp
2647 IF (PRESENT(alpha)) my_alpha = alpha
2648
2649 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
2650 cpabort("pw_multiply not implemented for non-identical grids!")
2651
2652#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
2653 IF (my_alpha == 1.0_dp) THEN
2654!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
2655 pw_out%array = pw_out%array + pw1%array* pw2%array
2656!$OMP END PARALLEL WORKSHARE
2657 ELSE
2658!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
2659 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2660!$OMP END PARALLEL WORKSHARE
2661 END IF
2662#else
2663 IF (my_alpha == 1.0_dp) THEN
2664 pw_out%array = pw_out%array + pw1%array* pw2%array
2665 ELSE
2666 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2667 END IF
2668#endif
2669
2670 CALL timestop(handle)
2671
2672 END SUBROUTINE pw_multiply_r1d_r1d_gs
2673
2674! **************************************************************************************************
2675!> \brief ...
2676!> \param pw1 ...
2677!> \param pw2 ...
2678! **************************************************************************************************
2679 SUBROUTINE pw_multiply_with_r1d_r1d_gs (pw1, pw2)
2680 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw1
2681 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2682
2683 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
2684
2685 INTEGER :: handle
2686
2687 CALL timeset(routinen, handle)
2688
2689 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
2690 cpabort("Incompatible grids!")
2691
2692!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
2693 pw1%array = pw1%array* pw2%array
2694!$OMP END PARALLEL WORKSHARE
2695
2696 CALL timestop(handle)
2697
2698 END SUBROUTINE pw_multiply_with_r1d_r1d_gs
2699
2700! **************************************************************************************************
2701!> \brief Calculate integral over unit cell for functions in plane wave basis
2702!> only returns the real part of it ......
2703!> \param pw1 ...
2704!> \param pw2 ...
2705!> \param sumtype ...
2706!> \param just_sum ...
2707!> \param local_only ...
2708!> \return ...
2709!> \par History
2710!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
2711!> \author apsi
2712! **************************************************************************************************
2713 FUNCTION pw_integral_ab_r1d_r1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
2714
2715 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2716 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2717 INTEGER, INTENT(IN), OPTIONAL :: sumtype
2718 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
2719 REAL(kind=dp) :: integral_value
2720
2721 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_r1d_gs'
2722
2723 INTEGER :: handle, loc_sumtype
2724 LOGICAL :: my_just_sum, my_local_only
2725
2726 CALL timeset(routinen, handle)
2727
2728 loc_sumtype = do_accurate_sum
2729 IF (PRESENT(sumtype)) loc_sumtype = sumtype
2730
2731 my_local_only = .false.
2732 IF (PRESENT(local_only)) my_local_only = local_only
2733
2734 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2735 cpabort("Grids incompatible")
2736 END IF
2737
2738 my_just_sum = .false.
2739 IF (PRESENT(just_sum)) my_just_sum = just_sum
2740
2741 ! do standard sum
2742 IF (loc_sumtype == do_standard_sum) THEN
2743
2744 ! Do standard sum
2745
2746 integral_value = dot_product(pw1%array, pw2%array)
2747
2748 ELSE
2749
2750 ! Do accurate sum
2751 integral_value = accurate_dot_product(pw1%array, pw2%array)
2752
2753 END IF
2754
2755 IF (.NOT. my_just_sum) THEN
2756 integral_value = integral_value*pw1%pw_grid%vol
2757 END IF
2758
2759 IF (pw1%pw_grid%grid_span == halfspace) THEN
2760 integral_value = 2.0_dp*integral_value
2761 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
2762 pw1%array(1)*pw2%array(1)
2763 END IF
2764
2765 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
2766 CALL pw1%pw_grid%para%group%sum(integral_value)
2767
2768 CALL timestop(handle)
2769
2770 END FUNCTION pw_integral_ab_r1d_r1d_gs
2771
2772! **************************************************************************************************
2773!> \brief ...
2774!> \param pw1 ...
2775!> \param pw2 ...
2776!> \return ...
2777! **************************************************************************************************
2778 FUNCTION pw_integral_a2b_r1d_r1d (pw1, pw2) RESULT(integral_value)
2779
2780 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2781 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2782 REAL(kind=dp) :: integral_value
2783
2784 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
2785
2786 INTEGER :: handle
2787
2788 CALL timeset(routinen, handle)
2789
2790 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2791 cpabort("Grids incompatible")
2792 END IF
2793
2794 integral_value = accurate_sum(pw1%array*pw2%array*pw1%pw_grid%gsq)
2795 IF (pw1%pw_grid%grid_span == halfspace) THEN
2796 integral_value = 2.0_dp*integral_value
2797 END IF
2798
2799 integral_value = integral_value*pw1%pw_grid%vol
2800
2801 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
2802 CALL pw1%pw_grid%para%group%sum(integral_value)
2803 CALL timestop(handle)
2804
2805 END FUNCTION pw_integral_a2b_r1d_r1d
2806! **************************************************************************************************
2807!> \brief copy a pw type variable
2808!> \param pw1 ...
2809!> \param pw2 ...
2810!> \par History
2811!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
2812!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
2813!> JGH (21-Feb-2003) : Code for generalized reference grids
2814!> \author apsi
2815!> \note
2816!> Currently only copying of respective types allowed,
2817!> in order to avoid errors
2818! **************************************************************************************************
2819 SUBROUTINE pw_copy_r1d_c1d_rs (pw1, pw2)
2820
2821 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2822 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
2823
2824 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
2825
2826 INTEGER :: handle
2827 INTEGER :: i, j, ng, ng1, ng2, ns
2828
2829 CALL timeset(routinen, handle)
2830
2831 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
2832 cpabort("Both grids must be either spherical or non-spherical!")
2833 IF (pw1%pw_grid%spherical) &
2834 cpabort("Spherical grids only exist in reciprocal space!")
2835
2836 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2837 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
2838 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
2839 ng1 = SIZE(pw1%array)
2840 ng2 = SIZE(pw2%array)
2841 ng = min(ng1, ng2)
2842!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
2843 pw2%array(1:ng) = cmplx(pw1%array(1:ng), 0.0_dp, kind=dp)
2844!$OMP END PARALLEL WORKSHARE
2845 IF (ng2 > ng) THEN
2846!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
2847 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
2848!$OMP END PARALLEL WORKSHARE
2849 END IF
2850 ELSE
2851 cpabort("Copies between spherical grids require compatible grids!")
2852 END IF
2853 ELSE
2854 ng1 = SIZE(pw1%array)
2855 ng2 = SIZE(pw2%array)
2856 ns = 2*max(ng1, ng2)
2857
2858 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2859 IF (ng1 >= ng2) THEN
2860!$OMP PARALLEL DO DEFAULT(NONE) &
2861!$OMP PRIVATE(i,j) &
2862!$OMP SHARED(ng2, pw1, pw2)
2863 DO i = 1, ng2
2864 j = pw2%pw_grid%gidx(i)
2865 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
2866 END DO
2867!$OMP END PARALLEL DO
2868 ELSE
2869 CALL pw_zero(pw2)
2870!$OMP PARALLEL DO DEFAULT(NONE) &
2871!$OMP PRIVATE(i,j) &
2872!$OMP SHARED(ng1, pw1, pw2)
2873 DO i = 1, ng1
2874 j = pw2%pw_grid%gidx(i)
2875 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
2876 END DO
2877!$OMP END PARALLEL DO
2878 END IF
2879 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2880 IF (ng1 >= ng2) THEN
2881!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2882 DO i = 1, ng2
2883 j = pw1%pw_grid%gidx(i)
2884 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
2885 END DO
2886!$OMP END PARALLEL DO
2887 ELSE
2888 CALL pw_zero(pw2)
2889!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2890 DO i = 1, ng1
2891 j = pw1%pw_grid%gidx(i)
2892 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
2893 END DO
2894!$OMP END PARALLEL DO
2895 END IF
2896 ELSE
2897 cpabort("Copy not implemented!")
2898 END IF
2899
2900 END IF
2901
2902 ELSE
2903!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2904 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
2905!$OMP END PARALLEL WORKSHARE
2906 END IF
2907
2908 CALL timestop(handle)
2909
2910 END SUBROUTINE pw_copy_r1d_c1d_rs
2911
2912! **************************************************************************************************
2913!> \brief ...
2914!> \param pw ...
2915!> \param array ...
2916! **************************************************************************************************
2917 SUBROUTINE pw_copy_to_array_r1d_c1d_rs (pw, array)
2918 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
2919 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
2920
2921 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
2922
2923 INTEGER :: handle
2924
2925 CALL timeset(routinen, handle)
2926
2927!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2928 array(:) = cmplx(pw%array(:), 0.0_dp, kind=dp)
2929!$OMP END PARALLEL WORKSHARE
2930
2931 CALL timestop(handle)
2932 END SUBROUTINE pw_copy_to_array_r1d_c1d_rs
2933
2934! **************************************************************************************************
2935!> \brief ...
2936!> \param pw ...
2937!> \param array ...
2938! **************************************************************************************************
2939 SUBROUTINE pw_copy_from_array_r1d_c1d_rs (pw, array)
2940 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
2941 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
2942
2943 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
2944
2945 INTEGER :: handle
2946
2947 CALL timeset(routinen, handle)
2948
2949!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2950 pw%array = real(array, kind=dp)
2951!$OMP END PARALLEL WORKSHARE
2952
2953 CALL timestop(handle)
2954 END SUBROUTINE pw_copy_from_array_r1d_c1d_rs
2955
2956! **************************************************************************************************
2957!> \brief pw2 = alpha*pw1 + beta*pw2
2958!> alpha defaults to 1, beta defaults to 1
2959!> \param pw1 ...
2960!> \param pw2 ...
2961!> \param alpha ...
2962!> \param beta ...
2963!> \param allow_noncompatible_grids ...
2964!> \par History
2965!> JGH (21-Feb-2003) : added reference grid functionality
2966!> JGH (01-Dec-2007) : rename and remove complex alpha
2967!> \author apsi
2968!> \note
2969!> Currently only summing up of respective types allowed,
2970!> in order to avoid errors
2971! **************************************************************************************************
2972 SUBROUTINE pw_axpy_r1d_c1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2973
2974 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2975 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
2976 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2977 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2978
2979 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2980
2981 INTEGER :: handle
2982 LOGICAL :: my_allow_noncompatible_grids
2983 REAL(KIND=dp) :: my_alpha, my_beta
2984 INTEGER :: i, j, ng, ng1, ng2
2985
2986 CALL timeset(routinen, handle)
2987
2988 my_alpha = 1.0_dp
2989 IF (PRESENT(alpha)) my_alpha = alpha
2990
2991 my_beta = 1.0_dp
2992 IF (PRESENT(beta)) my_beta = beta
2993
2994 my_allow_noncompatible_grids = .false.
2995 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2996
2997 IF (my_beta /= 1.0_dp) THEN
2998 IF (my_beta == 0.0_dp) THEN
2999 CALL pw_zero(pw2)
3000 ELSE
3001!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3002 pw2%array = pw2%array*my_beta
3003!$OMP END PARALLEL WORKSHARE
3004 END IF
3005 END IF
3006
3007 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3008
3009 IF (my_alpha == 1.0_dp) THEN
3010!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3011 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
3012!$OMP END PARALLEL WORKSHARE
3013 ELSE IF (my_alpha /= 0.0_dp) THEN
3014!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
3015 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
3016!$OMP END PARALLEL WORKSHARE
3017 END IF
3018
3019 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3020
3021 ng1 = SIZE(pw1%array)
3022 ng2 = SIZE(pw2%array)
3023 ng = min(ng1, ng2)
3024
3025 IF (pw1%pw_grid%spherical) THEN
3026 IF (my_alpha == 1.0_dp) THEN
3027!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3028 DO i = 1, ng
3029 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3030 END DO
3031!$OMP END PARALLEL DO
3032 ELSE IF (my_alpha /= 0.0_dp) THEN
3033!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
3034 DO i = 1, ng
3035 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3036 END DO
3037!$OMP END PARALLEL DO
3038 END IF
3039 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3040 IF (ng1 >= ng2) THEN
3041 IF (my_alpha == 1.0_dp) THEN
3042!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3043 DO i = 1, ng
3044 j = pw2%pw_grid%gidx(i)
3045 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3046 END DO
3047!$OMP END PARALLEL DO
3048 ELSE IF (my_alpha /= 0.0_dp) THEN
3049!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3050 DO i = 1, ng
3051 j = pw2%pw_grid%gidx(i)
3052 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3053 END DO
3054!$OMP END PARALLEL DO
3055 END IF
3056 ELSE
3057 IF (my_alpha == 1.0_dp) THEN
3058!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3059 DO i = 1, ng
3060 j = pw2%pw_grid%gidx(i)
3061 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3062 END DO
3063!$OMP END PARALLEL DO
3064 ELSE IF (my_alpha /= 0.0_dp) THEN
3065!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3066 DO i = 1, ng
3067 j = pw2%pw_grid%gidx(i)
3068 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3069 END DO
3070!$OMP END PARALLEL DO
3071 END IF
3072 END IF
3073 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3074 IF (ng1 >= ng2) THEN
3075 IF (my_alpha == 1.0_dp) THEN
3076!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3077 DO i = 1, ng
3078 j = pw1%pw_grid%gidx(i)
3079 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3080 END DO
3081!$OMP END PARALLEL DO
3082 ELSE IF (my_alpha /= 0.0_dp) THEN
3083!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3084 DO i = 1, ng
3085 j = pw1%pw_grid%gidx(i)
3086 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3087 END DO
3088!$OMP END PARALLEL DO
3089 END IF
3090 ELSE
3091 IF (my_alpha == 1.0_dp) THEN
3092!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3093 DO i = 1, ng
3094 j = pw1%pw_grid%gidx(i)
3095 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3096 END DO
3097!$OMP END PARALLEL DO
3098 ELSE IF (my_alpha /= 0.0_dp) THEN
3099!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3100 DO i = 1, ng
3101 j = pw1%pw_grid%gidx(i)
3102 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3103 END DO
3104!$OMP END PARALLEL DO
3105 END IF
3106 END IF
3107 ELSE
3108 cpabort("Grids not compatible")
3109 END IF
3110
3111 ELSE
3112
3113 cpabort("Grids not compatible")
3114
3115 END IF
3116
3117 CALL timestop(handle)
3118
3119 END SUBROUTINE pw_axpy_r1d_c1d_rs
3120
3121! **************************************************************************************************
3122!> \brief pw_out = pw_out + alpha * pw1 * pw2
3123!> alpha defaults to 1
3124!> \param pw_out ...
3125!> \param pw1 ...
3126!> \param pw2 ...
3127!> \param alpha ...
3128!> \author JGH
3129! **************************************************************************************************
3130 SUBROUTINE pw_multiply_r1d_c1d_rs (pw_out, pw1, pw2, alpha)
3131
3132 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw_out
3133 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
3134 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3135 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3136
3137 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3138
3139 INTEGER :: handle
3140 REAL(KIND=dp) :: my_alpha
3141
3142 CALL timeset(routinen, handle)
3143
3144 my_alpha = 1.0_dp
3145 IF (PRESENT(alpha)) my_alpha = alpha
3146
3147 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3148 cpabort("pw_multiply not implemented for non-identical grids!")
3149
3150#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
3151 IF (my_alpha == 1.0_dp) THEN
3152!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3153 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3154!$OMP END PARALLEL WORKSHARE
3155 ELSE
3156!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3157 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3158!$OMP END PARALLEL WORKSHARE
3159 END IF
3160#else
3161 IF (my_alpha == 1.0_dp) THEN
3162 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3163 ELSE
3164 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3165 END IF
3166#endif
3167
3168 CALL timestop(handle)
3169
3170 END SUBROUTINE pw_multiply_r1d_c1d_rs
3171
3172! **************************************************************************************************
3173!> \brief ...
3174!> \param pw1 ...
3175!> \param pw2 ...
3176! **************************************************************************************************
3177 SUBROUTINE pw_multiply_with_r1d_c1d_rs (pw1, pw2)
3178 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw1
3179 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3180
3181 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3182
3183 INTEGER :: handle
3184
3185 CALL timeset(routinen, handle)
3186
3187 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
3188 cpabort("Incompatible grids!")
3189
3190!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3191 pw1%array = pw1%array* real(pw2%array, kind=dp)
3192!$OMP END PARALLEL WORKSHARE
3193
3194 CALL timestop(handle)
3195
3196 END SUBROUTINE pw_multiply_with_r1d_c1d_rs
3197
3198! **************************************************************************************************
3199!> \brief Calculate integral over unit cell for functions in plane wave basis
3200!> only returns the real part of it ......
3201!> \param pw1 ...
3202!> \param pw2 ...
3203!> \param sumtype ...
3204!> \param just_sum ...
3205!> \param local_only ...
3206!> \return ...
3207!> \par History
3208!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
3209!> \author apsi
3210! **************************************************************************************************
3211 FUNCTION pw_integral_ab_r1d_c1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
3212
3213 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
3214 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3215 INTEGER, INTENT(IN), OPTIONAL :: sumtype
3216 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
3217 REAL(kind=dp) :: integral_value
3218
3219 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_c1d_rs'
3220
3221 INTEGER :: handle, loc_sumtype
3222 LOGICAL :: my_just_sum, my_local_only
3223
3224 CALL timeset(routinen, handle)
3225
3226 loc_sumtype = do_accurate_sum
3227 IF (PRESENT(sumtype)) loc_sumtype = sumtype
3228
3229 my_local_only = .false.
3230 IF (PRESENT(local_only)) my_local_only = local_only
3231
3232 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3233 cpabort("Grids incompatible")
3234 END IF
3235
3236 my_just_sum = .false.
3237 IF (PRESENT(just_sum)) my_just_sum = just_sum
3238
3239 ! do standard sum
3240 IF (loc_sumtype == do_standard_sum) THEN
3241
3242 ! Do standard sum
3243
3244 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3245
3246 ELSE
3247
3248 ! Do accurate sum
3249 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3250
3251 END IF
3252
3253 IF (.NOT. my_just_sum) THEN
3254 integral_value = integral_value*pw1%pw_grid%dvol
3255 END IF
3256
3257 IF (pw1%pw_grid%grid_span == halfspace) THEN
3258 integral_value = 2.0_dp*integral_value
3259 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
3260 pw1%array(1)*real(pw2%array(1), kind=dp)
3261 END IF
3262
3263 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
3264 CALL pw1%pw_grid%para%group%sum(integral_value)
3265
3266 CALL timestop(handle)
3267
3268 END FUNCTION pw_integral_ab_r1d_c1d_rs
3269! **************************************************************************************************
3270!> \brief copy a pw type variable
3271!> \param pw1 ...
3272!> \param pw2 ...
3273!> \par History
3274!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
3275!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
3276!> JGH (21-Feb-2003) : Code for generalized reference grids
3277!> \author apsi
3278!> \note
3279!> Currently only copying of respective types allowed,
3280!> in order to avoid errors
3281! **************************************************************************************************
3282 SUBROUTINE pw_copy_r1d_c1d_gs (pw1, pw2)
3283
3284 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3285 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
3286
3287 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
3288
3289 INTEGER :: handle
3290 INTEGER :: i, j, ng, ng1, ng2, ns
3291
3292 CALL timeset(routinen, handle)
3293
3294 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
3295 cpabort("Both grids must be either spherical or non-spherical!")
3296
3297 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3298 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
3299 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
3300 ng1 = SIZE(pw1%array)
3301 ng2 = SIZE(pw2%array)
3302 ng = min(ng1, ng2)
3303!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
3304 pw2%array(1:ng) = cmplx(pw1%array(1:ng), 0.0_dp, kind=dp)
3305!$OMP END PARALLEL WORKSHARE
3306 IF (ng2 > ng) THEN
3307!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
3308 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
3309!$OMP END PARALLEL WORKSHARE
3310 END IF
3311 ELSE
3312 cpabort("Copies between spherical grids require compatible grids!")
3313 END IF
3314 ELSE
3315 ng1 = SIZE(pw1%array)
3316 ng2 = SIZE(pw2%array)
3317 ns = 2*max(ng1, ng2)
3318
3319 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3320 IF (ng1 >= ng2) THEN
3321!$OMP PARALLEL DO DEFAULT(NONE) &
3322!$OMP PRIVATE(i,j) &
3323!$OMP SHARED(ng2, pw1, pw2)
3324 DO i = 1, ng2
3325 j = pw2%pw_grid%gidx(i)
3326 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
3327 END DO
3328!$OMP END PARALLEL DO
3329 ELSE
3330 CALL pw_zero(pw2)
3331!$OMP PARALLEL DO DEFAULT(NONE) &
3332!$OMP PRIVATE(i,j) &
3333!$OMP SHARED(ng1, pw1, pw2)
3334 DO i = 1, ng1
3335 j = pw2%pw_grid%gidx(i)
3336 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
3337 END DO
3338!$OMP END PARALLEL DO
3339 END IF
3340 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3341 IF (ng1 >= ng2) THEN
3342!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
3343 DO i = 1, ng2
3344 j = pw1%pw_grid%gidx(i)
3345 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
3346 END DO
3347!$OMP END PARALLEL DO
3348 ELSE
3349 CALL pw_zero(pw2)
3350!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
3351 DO i = 1, ng1
3352 j = pw1%pw_grid%gidx(i)
3353 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
3354 END DO
3355!$OMP END PARALLEL DO
3356 END IF
3357 ELSE
3358 cpabort("Copy not implemented!")
3359 END IF
3360
3361 END IF
3362
3363 ELSE
3364!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3365 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
3366!$OMP END PARALLEL WORKSHARE
3367 END IF
3368
3369 CALL timestop(handle)
3370
3371 END SUBROUTINE pw_copy_r1d_c1d_gs
3372
3373! **************************************************************************************************
3374!> \brief ...
3375!> \param pw ...
3376!> \param array ...
3377! **************************************************************************************************
3378 SUBROUTINE pw_copy_to_array_r1d_c1d_gs (pw, array)
3379 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
3380 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
3381
3382 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
3383
3384 INTEGER :: handle
3385
3386 CALL timeset(routinen, handle)
3387
3388!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3389 array(:) = cmplx(pw%array(:), 0.0_dp, kind=dp)
3390!$OMP END PARALLEL WORKSHARE
3391
3392 CALL timestop(handle)
3393 END SUBROUTINE pw_copy_to_array_r1d_c1d_gs
3394
3395! **************************************************************************************************
3396!> \brief ...
3397!> \param pw ...
3398!> \param array ...
3399! **************************************************************************************************
3400 SUBROUTINE pw_copy_from_array_r1d_c1d_gs (pw, array)
3401 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
3402 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
3403
3404 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
3405
3406 INTEGER :: handle
3407
3408 CALL timeset(routinen, handle)
3409
3410!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3411 pw%array = real(array, kind=dp)
3412!$OMP END PARALLEL WORKSHARE
3413
3414 CALL timestop(handle)
3415 END SUBROUTINE pw_copy_from_array_r1d_c1d_gs
3416
3417! **************************************************************************************************
3418!> \brief pw2 = alpha*pw1 + beta*pw2
3419!> alpha defaults to 1, beta defaults to 1
3420!> \param pw1 ...
3421!> \param pw2 ...
3422!> \param alpha ...
3423!> \param beta ...
3424!> \param allow_noncompatible_grids ...
3425!> \par History
3426!> JGH (21-Feb-2003) : added reference grid functionality
3427!> JGH (01-Dec-2007) : rename and remove complex alpha
3428!> \author apsi
3429!> \note
3430!> Currently only summing up of respective types allowed,
3431!> in order to avoid errors
3432! **************************************************************************************************
3433 SUBROUTINE pw_axpy_r1d_c1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
3434
3435 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3436 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
3437 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
3438 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
3439
3440 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
3441
3442 INTEGER :: handle
3443 LOGICAL :: my_allow_noncompatible_grids
3444 REAL(KIND=dp) :: my_alpha, my_beta
3445 INTEGER :: i, j, ng, ng1, ng2
3446
3447 CALL timeset(routinen, handle)
3448
3449 my_alpha = 1.0_dp
3450 IF (PRESENT(alpha)) my_alpha = alpha
3451
3452 my_beta = 1.0_dp
3453 IF (PRESENT(beta)) my_beta = beta
3454
3455 my_allow_noncompatible_grids = .false.
3456 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
3457
3458 IF (my_beta /= 1.0_dp) THEN
3459 IF (my_beta == 0.0_dp) THEN
3460 CALL pw_zero(pw2)
3461 ELSE
3462!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3463 pw2%array = pw2%array*my_beta
3464!$OMP END PARALLEL WORKSHARE
3465 END IF
3466 END IF
3467
3468 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3469
3470 IF (my_alpha == 1.0_dp) THEN
3471!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3472 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
3473!$OMP END PARALLEL WORKSHARE
3474 ELSE IF (my_alpha /= 0.0_dp) THEN
3475!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
3476 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
3477!$OMP END PARALLEL WORKSHARE
3478 END IF
3479
3480 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3481
3482 ng1 = SIZE(pw1%array)
3483 ng2 = SIZE(pw2%array)
3484 ng = min(ng1, ng2)
3485
3486 IF (pw1%pw_grid%spherical) THEN
3487 IF (my_alpha == 1.0_dp) THEN
3488!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3489 DO i = 1, ng
3490 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3491 END DO
3492!$OMP END PARALLEL DO
3493 ELSE IF (my_alpha /= 0.0_dp) THEN
3494!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
3495 DO i = 1, ng
3496 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3497 END DO
3498!$OMP END PARALLEL DO
3499 END IF
3500 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3501 IF (ng1 >= ng2) THEN
3502 IF (my_alpha == 1.0_dp) THEN
3503!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3504 DO i = 1, ng
3505 j = pw2%pw_grid%gidx(i)
3506 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3507 END DO
3508!$OMP END PARALLEL DO
3509 ELSE IF (my_alpha /= 0.0_dp) THEN
3510!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3511 DO i = 1, ng
3512 j = pw2%pw_grid%gidx(i)
3513 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3514 END DO
3515!$OMP END PARALLEL DO
3516 END IF
3517 ELSE
3518 IF (my_alpha == 1.0_dp) THEN
3519!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3520 DO i = 1, ng
3521 j = pw2%pw_grid%gidx(i)
3522 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3523 END DO
3524!$OMP END PARALLEL DO
3525 ELSE IF (my_alpha /= 0.0_dp) THEN
3526!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3527 DO i = 1, ng
3528 j = pw2%pw_grid%gidx(i)
3529 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3530 END DO
3531!$OMP END PARALLEL DO
3532 END IF
3533 END IF
3534 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3535 IF (ng1 >= ng2) THEN
3536 IF (my_alpha == 1.0_dp) THEN
3537!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3538 DO i = 1, ng
3539 j = pw1%pw_grid%gidx(i)
3540 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3541 END DO
3542!$OMP END PARALLEL DO
3543 ELSE IF (my_alpha /= 0.0_dp) THEN
3544!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3545 DO i = 1, ng
3546 j = pw1%pw_grid%gidx(i)
3547 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3548 END DO
3549!$OMP END PARALLEL DO
3550 END IF
3551 ELSE
3552 IF (my_alpha == 1.0_dp) THEN
3553!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3554 DO i = 1, ng
3555 j = pw1%pw_grid%gidx(i)
3556 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3557 END DO
3558!$OMP END PARALLEL DO
3559 ELSE IF (my_alpha /= 0.0_dp) THEN
3560!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3561 DO i = 1, ng
3562 j = pw1%pw_grid%gidx(i)
3563 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3564 END DO
3565!$OMP END PARALLEL DO
3566 END IF
3567 END IF
3568 ELSE
3569 cpabort("Grids not compatible")
3570 END IF
3571
3572 ELSE
3573
3574 cpabort("Grids not compatible")
3575
3576 END IF
3577
3578 CALL timestop(handle)
3579
3580 END SUBROUTINE pw_axpy_r1d_c1d_gs
3581
3582! **************************************************************************************************
3583!> \brief pw_out = pw_out + alpha * pw1 * pw2
3584!> alpha defaults to 1
3585!> \param pw_out ...
3586!> \param pw1 ...
3587!> \param pw2 ...
3588!> \param alpha ...
3589!> \author JGH
3590! **************************************************************************************************
3591 SUBROUTINE pw_multiply_r1d_c1d_gs (pw_out, pw1, pw2, alpha)
3592
3593 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw_out
3594 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3595 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3596 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3597
3598 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3599
3600 INTEGER :: handle
3601 REAL(KIND=dp) :: my_alpha
3602
3603 CALL timeset(routinen, handle)
3604
3605 my_alpha = 1.0_dp
3606 IF (PRESENT(alpha)) my_alpha = alpha
3607
3608 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3609 cpabort("pw_multiply not implemented for non-identical grids!")
3610
3611#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
3612 IF (my_alpha == 1.0_dp) THEN
3613!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3614 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3615!$OMP END PARALLEL WORKSHARE
3616 ELSE
3617!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3618 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3619!$OMP END PARALLEL WORKSHARE
3620 END IF
3621#else
3622 IF (my_alpha == 1.0_dp) THEN
3623 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3624 ELSE
3625 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3626 END IF
3627#endif
3628
3629 CALL timestop(handle)
3630
3631 END SUBROUTINE pw_multiply_r1d_c1d_gs
3632
3633! **************************************************************************************************
3634!> \brief ...
3635!> \param pw1 ...
3636!> \param pw2 ...
3637! **************************************************************************************************
3638 SUBROUTINE pw_multiply_with_r1d_c1d_gs (pw1, pw2)
3639 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw1
3640 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3641
3642 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3643
3644 INTEGER :: handle
3645
3646 CALL timeset(routinen, handle)
3647
3648 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
3649 cpabort("Incompatible grids!")
3650
3651!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3652 pw1%array = pw1%array* real(pw2%array, kind=dp)
3653!$OMP END PARALLEL WORKSHARE
3654
3655 CALL timestop(handle)
3656
3657 END SUBROUTINE pw_multiply_with_r1d_c1d_gs
3658
3659! **************************************************************************************************
3660!> \brief Calculate integral over unit cell for functions in plane wave basis
3661!> only returns the real part of it ......
3662!> \param pw1 ...
3663!> \param pw2 ...
3664!> \param sumtype ...
3665!> \param just_sum ...
3666!> \param local_only ...
3667!> \return ...
3668!> \par History
3669!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
3670!> \author apsi
3671! **************************************************************************************************
3672 FUNCTION pw_integral_ab_r1d_c1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
3673
3674 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3675 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3676 INTEGER, INTENT(IN), OPTIONAL :: sumtype
3677 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
3678 REAL(kind=dp) :: integral_value
3679
3680 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_c1d_gs'
3681
3682 INTEGER :: handle, loc_sumtype
3683 LOGICAL :: my_just_sum, my_local_only
3684
3685 CALL timeset(routinen, handle)
3686
3687 loc_sumtype = do_accurate_sum
3688 IF (PRESENT(sumtype)) loc_sumtype = sumtype
3689
3690 my_local_only = .false.
3691 IF (PRESENT(local_only)) my_local_only = local_only
3692
3693 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3694 cpabort("Grids incompatible")
3695 END IF
3696
3697 my_just_sum = .false.
3698 IF (PRESENT(just_sum)) my_just_sum = just_sum
3699
3700 ! do standard sum
3701 IF (loc_sumtype == do_standard_sum) THEN
3702
3703 ! Do standard sum
3704
3705 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3706
3707 ELSE
3708
3709 ! Do accurate sum
3710 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3711
3712 END IF
3713
3714 IF (.NOT. my_just_sum) THEN
3715 integral_value = integral_value*pw1%pw_grid%vol
3716 END IF
3717
3718 IF (pw1%pw_grid%grid_span == halfspace) THEN
3719 integral_value = 2.0_dp*integral_value
3720 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
3721 pw1%array(1)*real(pw2%array(1), kind=dp)
3722 END IF
3723
3724 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
3725 CALL pw1%pw_grid%para%group%sum(integral_value)
3726
3727 CALL timestop(handle)
3728
3729 END FUNCTION pw_integral_ab_r1d_c1d_gs
3730
3731! **************************************************************************************************
3732!> \brief ...
3733!> \param pw1 ...
3734!> \param pw2 ...
3735!> \return ...
3736! **************************************************************************************************
3737 FUNCTION pw_integral_a2b_r1d_c1d (pw1, pw2) RESULT(integral_value)
3738
3739 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3740 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3741 REAL(kind=dp) :: integral_value
3742
3743 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
3744
3745 INTEGER :: handle
3746
3747 CALL timeset(routinen, handle)
3748
3749 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3750 cpabort("Grids incompatible")
3751 END IF
3752
3753 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)*pw1%pw_grid%gsq)
3754 IF (pw1%pw_grid%grid_span == halfspace) THEN
3755 integral_value = 2.0_dp*integral_value
3756 END IF
3757
3758 integral_value = integral_value*pw1%pw_grid%vol
3759
3760 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
3761 CALL pw1%pw_grid%para%group%sum(integral_value)
3762 CALL timestop(handle)
3763
3764 END FUNCTION pw_integral_a2b_r1d_c1d
3765! **************************************************************************************************
3766!> \brief copy a pw type variable
3767!> \param pw1 ...
3768!> \param pw2 ...
3769!> \par History
3770!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
3771!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
3772!> JGH (21-Feb-2003) : Code for generalized reference grids
3773!> \author apsi
3774!> \note
3775!> Currently only copying of respective types allowed,
3776!> in order to avoid errors
3777! **************************************************************************************************
3778 SUBROUTINE pw_copy_r3d_r3d_rs (pw1, pw2)
3779
3780 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3781 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
3782
3783 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
3784
3785 INTEGER :: handle
3786
3787 CALL timeset(routinen, handle)
3788
3789 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
3790 cpabort("Both grids must be either spherical or non-spherical!")
3791 IF (pw1%pw_grid%spherical) &
3792 cpabort("Spherical grids only exist in reciprocal space!")
3793
3794 IF (any(shape(pw2%array) /= shape(pw1%array))) &
3795 cpabort("3D grids must be compatible!")
3796 IF (pw1%pw_grid%spherical) &
3797 cpabort("3D grids must not be spherical!")
3798!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3799 pw2%array(:, :, :) = pw1%array(:, :, :)
3800!$OMP END PARALLEL WORKSHARE
3801
3802 CALL timestop(handle)
3803
3804 END SUBROUTINE pw_copy_r3d_r3d_rs
3805
3806! **************************************************************************************************
3807!> \brief ...
3808!> \param pw ...
3809!> \param array ...
3810! **************************************************************************************************
3811 SUBROUTINE pw_copy_to_array_r3d_r3d_rs (pw, array)
3812 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
3813 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
3814
3815 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
3816
3817 INTEGER :: handle
3818
3819 CALL timeset(routinen, handle)
3820
3821!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3822 array(:, :, :) = pw%array(:, :, :)
3823!$OMP END PARALLEL WORKSHARE
3824
3825 CALL timestop(handle)
3826 END SUBROUTINE pw_copy_to_array_r3d_r3d_rs
3827
3828! **************************************************************************************************
3829!> \brief ...
3830!> \param pw ...
3831!> \param array ...
3832! **************************************************************************************************
3833 SUBROUTINE pw_copy_from_array_r3d_r3d_rs (pw, array)
3834 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
3835 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
3836
3837 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
3838
3839 INTEGER :: handle
3840
3841 CALL timeset(routinen, handle)
3842
3843!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3844 pw%array = array
3845!$OMP END PARALLEL WORKSHARE
3846
3847 CALL timestop(handle)
3848 END SUBROUTINE pw_copy_from_array_r3d_r3d_rs
3849
3850! **************************************************************************************************
3851!> \brief pw2 = alpha*pw1 + beta*pw2
3852!> alpha defaults to 1, beta defaults to 1
3853!> \param pw1 ...
3854!> \param pw2 ...
3855!> \param alpha ...
3856!> \param beta ...
3857!> \param allow_noncompatible_grids ...
3858!> \par History
3859!> JGH (21-Feb-2003) : added reference grid functionality
3860!> JGH (01-Dec-2007) : rename and remove complex alpha
3861!> \author apsi
3862!> \note
3863!> Currently only summing up of respective types allowed,
3864!> in order to avoid errors
3865! **************************************************************************************************
3866 SUBROUTINE pw_axpy_r3d_r3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
3867
3868 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3869 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
3870 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
3871 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
3872
3873 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
3874
3875 INTEGER :: handle
3876 LOGICAL :: my_allow_noncompatible_grids
3877 REAL(KIND=dp) :: my_alpha, my_beta
3878
3879 CALL timeset(routinen, handle)
3880
3881 my_alpha = 1.0_dp
3882 IF (PRESENT(alpha)) my_alpha = alpha
3883
3884 my_beta = 1.0_dp
3885 IF (PRESENT(beta)) my_beta = beta
3886
3887 my_allow_noncompatible_grids = .false.
3888 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
3889
3890 IF (my_beta /= 1.0_dp) THEN
3891 IF (my_beta == 0.0_dp) THEN
3892 CALL pw_zero(pw2)
3893 ELSE
3894!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3895 pw2%array = pw2%array*my_beta
3896!$OMP END PARALLEL WORKSHARE
3897 END IF
3898 END IF
3899
3900 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3901 IF (my_alpha == 1.0_dp) THEN
3902!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
3903 pw2%array = pw2%array + pw1%array
3904!$OMP END PARALLEL WORKSHARE
3905 ELSE IF (my_alpha /= 0.0_dp) THEN
3906!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
3907 pw2%array = pw2%array + my_alpha* pw1%array
3908!$OMP END PARALLEL WORKSHARE
3909 END IF
3910
3911 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3912
3913 IF (any(shape(pw1%array) /= shape(pw2%array))) &
3914 cpabort("Noncommensurate grids not implemented for 3D grids!")
3915
3916 IF (my_alpha == 1.0_dp) THEN
3917!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3918 pw2%array = pw2%array + pw1%array
3919!$OMP END PARALLEL WORKSHARE
3920 ELSE
3921!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
3922 pw2%array = pw2%array + my_alpha* pw1%array
3923!$OMP END PARALLEL WORKSHARE
3924 END IF
3925
3926 ELSE
3927
3928 cpabort("Grids not compatible")
3929
3930 END IF
3931
3932 CALL timestop(handle)
3933
3934 END SUBROUTINE pw_axpy_r3d_r3d_rs
3935
3936! **************************************************************************************************
3937!> \brief pw_out = pw_out + alpha * pw1 * pw2
3938!> alpha defaults to 1
3939!> \param pw_out ...
3940!> \param pw1 ...
3941!> \param pw2 ...
3942!> \param alpha ...
3943!> \author JGH
3944! **************************************************************************************************
3945 SUBROUTINE pw_multiply_r3d_r3d_rs (pw_out, pw1, pw2, alpha)
3946
3947 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw_out
3948 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3949 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
3950 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3951
3952 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3953
3954 INTEGER :: handle
3955 REAL(KIND=dp) :: my_alpha
3956
3957 CALL timeset(routinen, handle)
3958
3959 my_alpha = 1.0_dp
3960 IF (PRESENT(alpha)) my_alpha = alpha
3961
3962 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3963 cpabort("pw_multiply not implemented for non-identical grids!")
3964
3965#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
3966 IF (my_alpha == 1.0_dp) THEN
3967!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3968 pw_out%array = pw_out%array + pw1%array* pw2%array
3969!$OMP END PARALLEL WORKSHARE
3970 ELSE
3971!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3972 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
3973!$OMP END PARALLEL WORKSHARE
3974 END IF
3975#else
3976 IF (my_alpha == 1.0_dp) THEN
3977 pw_out%array = pw_out%array + pw1%array* pw2%array
3978 ELSE
3979 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
3980 END IF
3981#endif
3982
3983 CALL timestop(handle)
3984
3985 END SUBROUTINE pw_multiply_r3d_r3d_rs
3986
3987! **************************************************************************************************
3988!> \brief ...
3989!> \param pw1 ...
3990!> \param pw2 ...
3991! **************************************************************************************************
3992 SUBROUTINE pw_multiply_with_r3d_r3d_rs (pw1, pw2)
3993 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw1
3994 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
3995
3996 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3997
3998 INTEGER :: handle
3999
4000 CALL timeset(routinen, handle)
4001
4002 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4003 cpabort("Incompatible grids!")
4004
4005!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4006 pw1%array = pw1%array* pw2%array
4007!$OMP END PARALLEL WORKSHARE
4008
4009 CALL timestop(handle)
4010
4011 END SUBROUTINE pw_multiply_with_r3d_r3d_rs
4012
4013! **************************************************************************************************
4014!> \brief Calculate integral over unit cell for functions in plane wave basis
4015!> only returns the real part of it ......
4016!> \param pw1 ...
4017!> \param pw2 ...
4018!> \param sumtype ...
4019!> \param just_sum ...
4020!> \param local_only ...
4021!> \return ...
4022!> \par History
4023!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4024!> \author apsi
4025! **************************************************************************************************
4026 FUNCTION pw_integral_ab_r3d_r3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4027
4028 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4029 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
4030 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4031 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4032 REAL(kind=dp) :: integral_value
4033
4034 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_r3d_rs'
4035
4036 INTEGER :: handle, loc_sumtype
4037 LOGICAL :: my_just_sum, my_local_only
4038
4039 CALL timeset(routinen, handle)
4040
4041 loc_sumtype = do_accurate_sum
4042 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4043
4044 my_local_only = .false.
4045 IF (PRESENT(local_only)) my_local_only = local_only
4046
4047 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4048 cpabort("Grids incompatible")
4049 END IF
4050
4051 my_just_sum = .false.
4052 IF (PRESENT(just_sum)) my_just_sum = just_sum
4053
4054 ! do standard sum
4055 IF (loc_sumtype == do_standard_sum) THEN
4056
4057 ! Do standard sum
4058
4059 integral_value = sum(pw1%array*pw2%array)
4060
4061 ELSE
4062
4063 ! Do accurate sum
4064 integral_value = accurate_dot_product(pw1%array, pw2%array)
4065
4066 END IF
4067
4068 IF (.NOT. my_just_sum) THEN
4069 integral_value = integral_value*pw1%pw_grid%dvol
4070 END IF
4071
4072
4073 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4074 CALL pw1%pw_grid%para%group%sum(integral_value)
4075
4076 CALL timestop(handle)
4077
4078 END FUNCTION pw_integral_ab_r3d_r3d_rs
4079! **************************************************************************************************
4080!> \brief copy a pw type variable
4081!> \param pw1 ...
4082!> \param pw2 ...
4083!> \par History
4084!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4085!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4086!> JGH (21-Feb-2003) : Code for generalized reference grids
4087!> \author apsi
4088!> \note
4089!> Currently only copying of respective types allowed,
4090!> in order to avoid errors
4091! **************************************************************************************************
4092 SUBROUTINE pw_copy_r3d_r3d_gs (pw1, pw2)
4093
4094 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4095 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
4096
4097 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4098
4099 INTEGER :: handle
4100
4101 CALL timeset(routinen, handle)
4102
4103 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4104 cpabort("Both grids must be either spherical or non-spherical!")
4105
4106 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4107 cpabort("3D grids must be compatible!")
4108 IF (pw1%pw_grid%spherical) &
4109 cpabort("3D grids must not be spherical!")
4110!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4111 pw2%array(:, :, :) = pw1%array(:, :, :)
4112!$OMP END PARALLEL WORKSHARE
4113
4114 CALL timestop(handle)
4115
4116 END SUBROUTINE pw_copy_r3d_r3d_gs
4117
4118! **************************************************************************************************
4119!> \brief ...
4120!> \param pw ...
4121!> \param array ...
4122! **************************************************************************************************
4123 SUBROUTINE pw_copy_to_array_r3d_r3d_gs (pw, array)
4124 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4125 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4126
4127 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4128
4129 INTEGER :: handle
4130
4131 CALL timeset(routinen, handle)
4132
4133!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4134 array(:, :, :) = pw%array(:, :, :)
4135!$OMP END PARALLEL WORKSHARE
4136
4137 CALL timestop(handle)
4138 END SUBROUTINE pw_copy_to_array_r3d_r3d_gs
4139
4140! **************************************************************************************************
4141!> \brief ...
4142!> \param pw ...
4143!> \param array ...
4144! **************************************************************************************************
4145 SUBROUTINE pw_copy_from_array_r3d_r3d_gs (pw, array)
4146 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4147 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4148
4149 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4150
4151 INTEGER :: handle
4152
4153 CALL timeset(routinen, handle)
4154
4155!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4156 pw%array = array
4157!$OMP END PARALLEL WORKSHARE
4158
4159 CALL timestop(handle)
4160 END SUBROUTINE pw_copy_from_array_r3d_r3d_gs
4161
4162! **************************************************************************************************
4163!> \brief pw2 = alpha*pw1 + beta*pw2
4164!> alpha defaults to 1, beta defaults to 1
4165!> \param pw1 ...
4166!> \param pw2 ...
4167!> \param alpha ...
4168!> \param beta ...
4169!> \param allow_noncompatible_grids ...
4170!> \par History
4171!> JGH (21-Feb-2003) : added reference grid functionality
4172!> JGH (01-Dec-2007) : rename and remove complex alpha
4173!> \author apsi
4174!> \note
4175!> Currently only summing up of respective types allowed,
4176!> in order to avoid errors
4177! **************************************************************************************************
4178 SUBROUTINE pw_axpy_r3d_r3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4179
4180 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4181 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
4182 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4183 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4184
4185 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4186
4187 INTEGER :: handle
4188 LOGICAL :: my_allow_noncompatible_grids
4189 REAL(KIND=dp) :: my_alpha, my_beta
4190
4191 CALL timeset(routinen, handle)
4192
4193 my_alpha = 1.0_dp
4194 IF (PRESENT(alpha)) my_alpha = alpha
4195
4196 my_beta = 1.0_dp
4197 IF (PRESENT(beta)) my_beta = beta
4198
4199 my_allow_noncompatible_grids = .false.
4200 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4201
4202 IF (my_beta /= 1.0_dp) THEN
4203 IF (my_beta == 0.0_dp) THEN
4204 CALL pw_zero(pw2)
4205 ELSE
4206!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4207 pw2%array = pw2%array*my_beta
4208!$OMP END PARALLEL WORKSHARE
4209 END IF
4210 END IF
4211
4212 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4213 IF (my_alpha == 1.0_dp) THEN
4214!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4215 pw2%array = pw2%array + pw1%array
4216!$OMP END PARALLEL WORKSHARE
4217 ELSE IF (my_alpha /= 0.0_dp) THEN
4218!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4219 pw2%array = pw2%array + my_alpha* pw1%array
4220!$OMP END PARALLEL WORKSHARE
4221 END IF
4222
4223 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4224
4225 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4226 cpabort("Noncommensurate grids not implemented for 3D grids!")
4227
4228 IF (my_alpha == 1.0_dp) THEN
4229!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4230 pw2%array = pw2%array + pw1%array
4231!$OMP END PARALLEL WORKSHARE
4232 ELSE
4233!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4234 pw2%array = pw2%array + my_alpha* pw1%array
4235!$OMP END PARALLEL WORKSHARE
4236 END IF
4237
4238 ELSE
4239
4240 cpabort("Grids not compatible")
4241
4242 END IF
4243
4244 CALL timestop(handle)
4245
4246 END SUBROUTINE pw_axpy_r3d_r3d_gs
4247
4248! **************************************************************************************************
4249!> \brief pw_out = pw_out + alpha * pw1 * pw2
4250!> alpha defaults to 1
4251!> \param pw_out ...
4252!> \param pw1 ...
4253!> \param pw2 ...
4254!> \param alpha ...
4255!> \author JGH
4256! **************************************************************************************************
4257 SUBROUTINE pw_multiply_r3d_r3d_gs (pw_out, pw1, pw2, alpha)
4258
4259 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw_out
4260 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4261 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4262 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4263
4264 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4265
4266 INTEGER :: handle
4267 REAL(KIND=dp) :: my_alpha
4268
4269 CALL timeset(routinen, handle)
4270
4271 my_alpha = 1.0_dp
4272 IF (PRESENT(alpha)) my_alpha = alpha
4273
4274 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4275 cpabort("pw_multiply not implemented for non-identical grids!")
4276
4277#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
4278 IF (my_alpha == 1.0_dp) THEN
4279!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4280 pw_out%array = pw_out%array + pw1%array* pw2%array
4281!$OMP END PARALLEL WORKSHARE
4282 ELSE
4283!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4284 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
4285!$OMP END PARALLEL WORKSHARE
4286 END IF
4287#else
4288 IF (my_alpha == 1.0_dp) THEN
4289 pw_out%array = pw_out%array + pw1%array* pw2%array
4290 ELSE
4291 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
4292 END IF
4293#endif
4294
4295 CALL timestop(handle)
4296
4297 END SUBROUTINE pw_multiply_r3d_r3d_gs
4298
4299! **************************************************************************************************
4300!> \brief ...
4301!> \param pw1 ...
4302!> \param pw2 ...
4303! **************************************************************************************************
4304 SUBROUTINE pw_multiply_with_r3d_r3d_gs (pw1, pw2)
4305 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw1
4306 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4307
4308 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4309
4310 INTEGER :: handle
4311
4312 CALL timeset(routinen, handle)
4313
4314 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4315 cpabort("Incompatible grids!")
4316
4317!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4318 pw1%array = pw1%array* pw2%array
4319!$OMP END PARALLEL WORKSHARE
4320
4321 CALL timestop(handle)
4322
4323 END SUBROUTINE pw_multiply_with_r3d_r3d_gs
4324
4325! **************************************************************************************************
4326!> \brief Calculate integral over unit cell for functions in plane wave basis
4327!> only returns the real part of it ......
4328!> \param pw1 ...
4329!> \param pw2 ...
4330!> \param sumtype ...
4331!> \param just_sum ...
4332!> \param local_only ...
4333!> \return ...
4334!> \par History
4335!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4336!> \author apsi
4337! **************************************************************************************************
4338 FUNCTION pw_integral_ab_r3d_r3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4339
4340 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4341 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4342 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4343 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4344 REAL(kind=dp) :: integral_value
4345
4346 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_r3d_gs'
4347
4348 INTEGER :: handle, loc_sumtype
4349 LOGICAL :: my_just_sum, my_local_only
4350
4351 CALL timeset(routinen, handle)
4352
4353 loc_sumtype = do_accurate_sum
4354 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4355
4356 my_local_only = .false.
4357 IF (PRESENT(local_only)) my_local_only = local_only
4358
4359 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4360 cpabort("Grids incompatible")
4361 END IF
4362
4363 my_just_sum = .false.
4364 IF (PRESENT(just_sum)) my_just_sum = just_sum
4365
4366 ! do standard sum
4367 IF (loc_sumtype == do_standard_sum) THEN
4368
4369 ! Do standard sum
4370
4371 integral_value = sum(pw1%array*pw2%array)
4372
4373 ELSE
4374
4375 ! Do accurate sum
4376 integral_value = accurate_dot_product(pw1%array, pw2%array)
4377
4378 END IF
4379
4380 IF (.NOT. my_just_sum) THEN
4381 integral_value = integral_value*pw1%pw_grid%vol
4382 END IF
4383
4384
4385 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4386 CALL pw1%pw_grid%para%group%sum(integral_value)
4387
4388 CALL timestop(handle)
4389
4390 END FUNCTION pw_integral_ab_r3d_r3d_gs
4391
4392! **************************************************************************************************
4393!> \brief copy a pw type variable
4394!> \param pw1 ...
4395!> \param pw2 ...
4396!> \par History
4397!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4398!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4399!> JGH (21-Feb-2003) : Code for generalized reference grids
4400!> \author apsi
4401!> \note
4402!> Currently only copying of respective types allowed,
4403!> in order to avoid errors
4404! **************************************************************************************************
4405 SUBROUTINE pw_copy_r3d_c3d_rs (pw1, pw2)
4406
4407 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4408 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
4409
4410 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4411
4412 INTEGER :: handle
4413
4414 CALL timeset(routinen, handle)
4415
4416 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4417 cpabort("Both grids must be either spherical or non-spherical!")
4418 IF (pw1%pw_grid%spherical) &
4419 cpabort("Spherical grids only exist in reciprocal space!")
4420
4421 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4422 cpabort("3D grids must be compatible!")
4423 IF (pw1%pw_grid%spherical) &
4424 cpabort("3D grids must not be spherical!")
4425!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4426 pw2%array(:, :, :) = cmplx(pw1%array(:, :, :), 0.0_dp, kind=dp)
4427!$OMP END PARALLEL WORKSHARE
4428
4429 CALL timestop(handle)
4430
4431 END SUBROUTINE pw_copy_r3d_c3d_rs
4432
4433! **************************************************************************************************
4434!> \brief ...
4435!> \param pw ...
4436!> \param array ...
4437! **************************************************************************************************
4438 SUBROUTINE pw_copy_to_array_r3d_c3d_rs (pw, array)
4439 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
4440 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4441
4442 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4443
4444 INTEGER :: handle
4445
4446 CALL timeset(routinen, handle)
4447
4448!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4449 array(:, :, :) = cmplx(pw%array(:, :, :), 0.0_dp, kind=dp)
4450!$OMP END PARALLEL WORKSHARE
4451
4452 CALL timestop(handle)
4453 END SUBROUTINE pw_copy_to_array_r3d_c3d_rs
4454
4455! **************************************************************************************************
4456!> \brief ...
4457!> \param pw ...
4458!> \param array ...
4459! **************************************************************************************************
4460 SUBROUTINE pw_copy_from_array_r3d_c3d_rs (pw, array)
4461 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
4462 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4463
4464 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4465
4466 INTEGER :: handle
4467
4468 CALL timeset(routinen, handle)
4469
4470!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4471 pw%array = real(array, kind=dp)
4472!$OMP END PARALLEL WORKSHARE
4473
4474 CALL timestop(handle)
4475 END SUBROUTINE pw_copy_from_array_r3d_c3d_rs
4476
4477! **************************************************************************************************
4478!> \brief pw2 = alpha*pw1 + beta*pw2
4479!> alpha defaults to 1, beta defaults to 1
4480!> \param pw1 ...
4481!> \param pw2 ...
4482!> \param alpha ...
4483!> \param beta ...
4484!> \param allow_noncompatible_grids ...
4485!> \par History
4486!> JGH (21-Feb-2003) : added reference grid functionality
4487!> JGH (01-Dec-2007) : rename and remove complex alpha
4488!> \author apsi
4489!> \note
4490!> Currently only summing up of respective types allowed,
4491!> in order to avoid errors
4492! **************************************************************************************************
4493 SUBROUTINE pw_axpy_r3d_c3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4494
4495 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4496 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
4497 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4498 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4499
4500 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4501
4502 INTEGER :: handle
4503 LOGICAL :: my_allow_noncompatible_grids
4504 REAL(KIND=dp) :: my_alpha, my_beta
4505
4506 CALL timeset(routinen, handle)
4507
4508 my_alpha = 1.0_dp
4509 IF (PRESENT(alpha)) my_alpha = alpha
4510
4511 my_beta = 1.0_dp
4512 IF (PRESENT(beta)) my_beta = beta
4513
4514 my_allow_noncompatible_grids = .false.
4515 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4516
4517 IF (my_beta /= 1.0_dp) THEN
4518 IF (my_beta == 0.0_dp) THEN
4519 CALL pw_zero(pw2)
4520 ELSE
4521!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4522 pw2%array = pw2%array*my_beta
4523!$OMP END PARALLEL WORKSHARE
4524 END IF
4525 END IF
4526
4527 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4528 IF (my_alpha == 1.0_dp) THEN
4529!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4530 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4531!$OMP END PARALLEL WORKSHARE
4532 ELSE IF (my_alpha /= 0.0_dp) THEN
4533!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4534 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4535!$OMP END PARALLEL WORKSHARE
4536 END IF
4537
4538 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4539
4540 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4541 cpabort("Noncommensurate grids not implemented for 3D grids!")
4542
4543 IF (my_alpha == 1.0_dp) THEN
4544!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4545 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4546!$OMP END PARALLEL WORKSHARE
4547 ELSE
4548!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4549 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4550!$OMP END PARALLEL WORKSHARE
4551 END IF
4552
4553 ELSE
4554
4555 cpabort("Grids not compatible")
4556
4557 END IF
4558
4559 CALL timestop(handle)
4560
4561 END SUBROUTINE pw_axpy_r3d_c3d_rs
4562
4563! **************************************************************************************************
4564!> \brief pw_out = pw_out + alpha * pw1 * pw2
4565!> alpha defaults to 1
4566!> \param pw_out ...
4567!> \param pw1 ...
4568!> \param pw2 ...
4569!> \param alpha ...
4570!> \author JGH
4571! **************************************************************************************************
4572 SUBROUTINE pw_multiply_r3d_c3d_rs (pw_out, pw1, pw2, alpha)
4573
4574 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw_out
4575 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4576 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4577 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4578
4579 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4580
4581 INTEGER :: handle
4582 REAL(KIND=dp) :: my_alpha
4583
4584 CALL timeset(routinen, handle)
4585
4586 my_alpha = 1.0_dp
4587 IF (PRESENT(alpha)) my_alpha = alpha
4588
4589 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4590 cpabort("pw_multiply not implemented for non-identical grids!")
4591
4592#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
4593 IF (my_alpha == 1.0_dp) THEN
4594!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4595 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4596!$OMP END PARALLEL WORKSHARE
4597 ELSE
4598!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4599 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4600!$OMP END PARALLEL WORKSHARE
4601 END IF
4602#else
4603 IF (my_alpha == 1.0_dp) THEN
4604 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4605 ELSE
4606 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4607 END IF
4608#endif
4609
4610 CALL timestop(handle)
4611
4612 END SUBROUTINE pw_multiply_r3d_c3d_rs
4613
4614! **************************************************************************************************
4615!> \brief ...
4616!> \param pw1 ...
4617!> \param pw2 ...
4618! **************************************************************************************************
4619 SUBROUTINE pw_multiply_with_r3d_c3d_rs (pw1, pw2)
4620 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw1
4621 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4622
4623 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4624
4625 INTEGER :: handle
4626
4627 CALL timeset(routinen, handle)
4628
4629 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4630 cpabort("Incompatible grids!")
4631
4632!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4633 pw1%array = pw1%array* real(pw2%array, kind=dp)
4634!$OMP END PARALLEL WORKSHARE
4635
4636 CALL timestop(handle)
4637
4638 END SUBROUTINE pw_multiply_with_r3d_c3d_rs
4639
4640! **************************************************************************************************
4641!> \brief Calculate integral over unit cell for functions in plane wave basis
4642!> only returns the real part of it ......
4643!> \param pw1 ...
4644!> \param pw2 ...
4645!> \param sumtype ...
4646!> \param just_sum ...
4647!> \param local_only ...
4648!> \return ...
4649!> \par History
4650!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4651!> \author apsi
4652! **************************************************************************************************
4653 FUNCTION pw_integral_ab_r3d_c3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4654
4655 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4656 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4657 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4658 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4659 REAL(kind=dp) :: integral_value
4660
4661 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_c3d_rs'
4662
4663 INTEGER :: handle, loc_sumtype
4664 LOGICAL :: my_just_sum, my_local_only
4665
4666 CALL timeset(routinen, handle)
4667
4668 loc_sumtype = do_accurate_sum
4669 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4670
4671 my_local_only = .false.
4672 IF (PRESENT(local_only)) my_local_only = local_only
4673
4674 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4675 cpabort("Grids incompatible")
4676 END IF
4677
4678 my_just_sum = .false.
4679 IF (PRESENT(just_sum)) my_just_sum = just_sum
4680
4681 ! do standard sum
4682 IF (loc_sumtype == do_standard_sum) THEN
4683
4684 ! Do standard sum
4685
4686 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4687
4688 ELSE
4689
4690 ! Do accurate sum
4691 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4692
4693 END IF
4694
4695 IF (.NOT. my_just_sum) THEN
4696 integral_value = integral_value*pw1%pw_grid%dvol
4697 END IF
4698
4699
4700 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4701 CALL pw1%pw_grid%para%group%sum(integral_value)
4702
4703 CALL timestop(handle)
4704
4705 END FUNCTION pw_integral_ab_r3d_c3d_rs
4706! **************************************************************************************************
4707!> \brief copy a pw type variable
4708!> \param pw1 ...
4709!> \param pw2 ...
4710!> \par History
4711!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4712!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4713!> JGH (21-Feb-2003) : Code for generalized reference grids
4714!> \author apsi
4715!> \note
4716!> Currently only copying of respective types allowed,
4717!> in order to avoid errors
4718! **************************************************************************************************
4719 SUBROUTINE pw_copy_r3d_c3d_gs (pw1, pw2)
4720
4721 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4722 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
4723
4724 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4725
4726 INTEGER :: handle
4727
4728 CALL timeset(routinen, handle)
4729
4730 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4731 cpabort("Both grids must be either spherical or non-spherical!")
4732
4733 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4734 cpabort("3D grids must be compatible!")
4735 IF (pw1%pw_grid%spherical) &
4736 cpabort("3D grids must not be spherical!")
4737!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4738 pw2%array(:, :, :) = cmplx(pw1%array(:, :, :), 0.0_dp, kind=dp)
4739!$OMP END PARALLEL WORKSHARE
4740
4741 CALL timestop(handle)
4742
4743 END SUBROUTINE pw_copy_r3d_c3d_gs
4744
4745! **************************************************************************************************
4746!> \brief ...
4747!> \param pw ...
4748!> \param array ...
4749! **************************************************************************************************
4750 SUBROUTINE pw_copy_to_array_r3d_c3d_gs (pw, array)
4751 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4752 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4753
4754 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4755
4756 INTEGER :: handle
4757
4758 CALL timeset(routinen, handle)
4759
4760!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4761 array(:, :, :) = cmplx(pw%array(:, :, :), 0.0_dp, kind=dp)
4762!$OMP END PARALLEL WORKSHARE
4763
4764 CALL timestop(handle)
4765 END SUBROUTINE pw_copy_to_array_r3d_c3d_gs
4766
4767! **************************************************************************************************
4768!> \brief ...
4769!> \param pw ...
4770!> \param array ...
4771! **************************************************************************************************
4772 SUBROUTINE pw_copy_from_array_r3d_c3d_gs (pw, array)
4773 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4774 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4775
4776 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4777
4778 INTEGER :: handle
4779
4780 CALL timeset(routinen, handle)
4781
4782!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4783 pw%array = real(array, kind=dp)
4784!$OMP END PARALLEL WORKSHARE
4785
4786 CALL timestop(handle)
4787 END SUBROUTINE pw_copy_from_array_r3d_c3d_gs
4788
4789! **************************************************************************************************
4790!> \brief pw2 = alpha*pw1 + beta*pw2
4791!> alpha defaults to 1, beta defaults to 1
4792!> \param pw1 ...
4793!> \param pw2 ...
4794!> \param alpha ...
4795!> \param beta ...
4796!> \param allow_noncompatible_grids ...
4797!> \par History
4798!> JGH (21-Feb-2003) : added reference grid functionality
4799!> JGH (01-Dec-2007) : rename and remove complex alpha
4800!> \author apsi
4801!> \note
4802!> Currently only summing up of respective types allowed,
4803!> in order to avoid errors
4804! **************************************************************************************************
4805 SUBROUTINE pw_axpy_r3d_c3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4806
4807 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4808 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
4809 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4810 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4811
4812 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4813
4814 INTEGER :: handle
4815 LOGICAL :: my_allow_noncompatible_grids
4816 REAL(KIND=dp) :: my_alpha, my_beta
4817
4818 CALL timeset(routinen, handle)
4819
4820 my_alpha = 1.0_dp
4821 IF (PRESENT(alpha)) my_alpha = alpha
4822
4823 my_beta = 1.0_dp
4824 IF (PRESENT(beta)) my_beta = beta
4825
4826 my_allow_noncompatible_grids = .false.
4827 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4828
4829 IF (my_beta /= 1.0_dp) THEN
4830 IF (my_beta == 0.0_dp) THEN
4831 CALL pw_zero(pw2)
4832 ELSE
4833!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4834 pw2%array = pw2%array*my_beta
4835!$OMP END PARALLEL WORKSHARE
4836 END IF
4837 END IF
4838
4839 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4840 IF (my_alpha == 1.0_dp) THEN
4841!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4842 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4843!$OMP END PARALLEL WORKSHARE
4844 ELSE IF (my_alpha /= 0.0_dp) THEN
4845!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4846 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4847!$OMP END PARALLEL WORKSHARE
4848 END IF
4849
4850 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4851
4852 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4853 cpabort("Noncommensurate grids not implemented for 3D grids!")
4854
4855 IF (my_alpha == 1.0_dp) THEN
4856!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4857 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4858!$OMP END PARALLEL WORKSHARE
4859 ELSE
4860!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4861 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4862!$OMP END PARALLEL WORKSHARE
4863 END IF
4864
4865 ELSE
4866
4867 cpabort("Grids not compatible")
4868
4869 END IF
4870
4871 CALL timestop(handle)
4872
4873 END SUBROUTINE pw_axpy_r3d_c3d_gs
4874
4875! **************************************************************************************************
4876!> \brief pw_out = pw_out + alpha * pw1 * pw2
4877!> alpha defaults to 1
4878!> \param pw_out ...
4879!> \param pw1 ...
4880!> \param pw2 ...
4881!> \param alpha ...
4882!> \author JGH
4883! **************************************************************************************************
4884 SUBROUTINE pw_multiply_r3d_c3d_gs (pw_out, pw1, pw2, alpha)
4885
4886 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw_out
4887 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4888 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4889 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4890
4891 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4892
4893 INTEGER :: handle
4894 REAL(KIND=dp) :: my_alpha
4895
4896 CALL timeset(routinen, handle)
4897
4898 my_alpha = 1.0_dp
4899 IF (PRESENT(alpha)) my_alpha = alpha
4900
4901 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4902 cpabort("pw_multiply not implemented for non-identical grids!")
4903
4904#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
4905 IF (my_alpha == 1.0_dp) THEN
4906!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4907 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4908!$OMP END PARALLEL WORKSHARE
4909 ELSE
4910!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4911 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4912!$OMP END PARALLEL WORKSHARE
4913 END IF
4914#else
4915 IF (my_alpha == 1.0_dp) THEN
4916 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4917 ELSE
4918 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4919 END IF
4920#endif
4921
4922 CALL timestop(handle)
4923
4924 END SUBROUTINE pw_multiply_r3d_c3d_gs
4925
4926! **************************************************************************************************
4927!> \brief ...
4928!> \param pw1 ...
4929!> \param pw2 ...
4930! **************************************************************************************************
4931 SUBROUTINE pw_multiply_with_r3d_c3d_gs (pw1, pw2)
4932 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw1
4933 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4934
4935 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4936
4937 INTEGER :: handle
4938
4939 CALL timeset(routinen, handle)
4940
4941 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4942 cpabort("Incompatible grids!")
4943
4944!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4945 pw1%array = pw1%array* real(pw2%array, kind=dp)
4946!$OMP END PARALLEL WORKSHARE
4947
4948 CALL timestop(handle)
4949
4950 END SUBROUTINE pw_multiply_with_r3d_c3d_gs
4951
4952! **************************************************************************************************
4953!> \brief Calculate integral over unit cell for functions in plane wave basis
4954!> only returns the real part of it ......
4955!> \param pw1 ...
4956!> \param pw2 ...
4957!> \param sumtype ...
4958!> \param just_sum ...
4959!> \param local_only ...
4960!> \return ...
4961!> \par History
4962!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4963!> \author apsi
4964! **************************************************************************************************
4965 FUNCTION pw_integral_ab_r3d_c3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4966
4967 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4968 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4969 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4970 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4971 REAL(kind=dp) :: integral_value
4972
4973 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_c3d_gs'
4974
4975 INTEGER :: handle, loc_sumtype
4976 LOGICAL :: my_just_sum, my_local_only
4977
4978 CALL timeset(routinen, handle)
4979
4980 loc_sumtype = do_accurate_sum
4981 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4982
4983 my_local_only = .false.
4984 IF (PRESENT(local_only)) my_local_only = local_only
4985
4986 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4987 cpabort("Grids incompatible")
4988 END IF
4989
4990 my_just_sum = .false.
4991 IF (PRESENT(just_sum)) my_just_sum = just_sum
4992
4993 ! do standard sum
4994 IF (loc_sumtype == do_standard_sum) THEN
4995
4996 ! Do standard sum
4997
4998 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4999
5000 ELSE
5001
5002 ! Do accurate sum
5003 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
5004
5005 END IF
5006
5007 IF (.NOT. my_just_sum) THEN
5008 integral_value = integral_value*pw1%pw_grid%vol
5009 END IF
5010
5011
5012 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5013 CALL pw1%pw_grid%para%group%sum(integral_value)
5014
5015 CALL timestop(handle)
5016
5017 END FUNCTION pw_integral_ab_r3d_c3d_gs
5018
5019! **************************************************************************************************
5020!> \brief copy a pw type variable
5021!> \param pw1 ...
5022!> \param pw2 ...
5023!> \par History
5024!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5025!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5026!> JGH (21-Feb-2003) : Code for generalized reference grids
5027!> \author apsi
5028!> \note
5029!> Currently only copying of respective types allowed,
5030!> in order to avoid errors
5031! **************************************************************************************************
5032 SUBROUTINE pw_copy_c1d_r1d_rs (pw1, pw2)
5033
5034 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5035 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
5036
5037 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5038
5039 INTEGER :: handle
5040 INTEGER :: i, j, ng, ng1, ng2, ns
5041
5042 CALL timeset(routinen, handle)
5043
5044 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
5045 cpabort("Both grids must be either spherical or non-spherical!")
5046 IF (pw1%pw_grid%spherical) &
5047 cpabort("Spherical grids only exist in reciprocal space!")
5048
5049 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5050 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
5051 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
5052 ng1 = SIZE(pw1%array)
5053 ng2 = SIZE(pw2%array)
5054 ng = min(ng1, ng2)
5055!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
5056 pw2%array(1:ng) = real(pw1%array(1:ng), kind=dp)
5057!$OMP END PARALLEL WORKSHARE
5058 IF (ng2 > ng) THEN
5059!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
5060 pw2%array(ng + 1:ng2) = 0.0_dp
5061!$OMP END PARALLEL WORKSHARE
5062 END IF
5063 ELSE
5064 cpabort("Copies between spherical grids require compatible grids!")
5065 END IF
5066 ELSE
5067 ng1 = SIZE(pw1%array)
5068 ng2 = SIZE(pw2%array)
5069 ns = 2*max(ng1, ng2)
5070
5071 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5072 IF (ng1 >= ng2) THEN
5073!$OMP PARALLEL DO DEFAULT(NONE) &
5074!$OMP PRIVATE(i,j) &
5075!$OMP SHARED(ng2, pw1, pw2)
5076 DO i = 1, ng2
5077 j = pw2%pw_grid%gidx(i)
5078 pw2%array(i) = real(pw1%array(j), kind=dp)
5079 END DO
5080!$OMP END PARALLEL DO
5081 ELSE
5082 CALL pw_zero(pw2)
5083!$OMP PARALLEL DO DEFAULT(NONE) &
5084!$OMP PRIVATE(i,j) &
5085!$OMP SHARED(ng1, pw1, pw2)
5086 DO i = 1, ng1
5087 j = pw2%pw_grid%gidx(i)
5088 pw2%array(j) = real(pw1%array(i), kind=dp)
5089 END DO
5090!$OMP END PARALLEL DO
5091 END IF
5092 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5093 IF (ng1 >= ng2) THEN
5094!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5095 DO i = 1, ng2
5096 j = pw1%pw_grid%gidx(i)
5097 pw2%array(i) = real(pw1%array(j), kind=dp)
5098 END DO
5099!$OMP END PARALLEL DO
5100 ELSE
5101 CALL pw_zero(pw2)
5102!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5103 DO i = 1, ng1
5104 j = pw1%pw_grid%gidx(i)
5105 pw2%array(j) = real(pw1%array(i), kind=dp)
5106 END DO
5107!$OMP END PARALLEL DO
5108 END IF
5109 ELSE
5110 cpabort("Copy not implemented!")
5111 END IF
5112
5113 END IF
5114
5115 ELSE
5116!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5117 pw2%array = real(pw1%array, kind=dp)
5118!$OMP END PARALLEL WORKSHARE
5119 END IF
5120
5121 CALL timestop(handle)
5122
5123 END SUBROUTINE pw_copy_c1d_r1d_rs
5124
5125! **************************************************************************************************
5126!> \brief ...
5127!> \param pw ...
5128!> \param array ...
5129! **************************************************************************************************
5130 SUBROUTINE pw_copy_to_array_c1d_r1d_rs (pw, array)
5131 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5132 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
5133
5134 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
5135
5136 INTEGER :: handle
5137
5138 CALL timeset(routinen, handle)
5139
5140!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5141 array(:) = real(pw%array(:), kind=dp)
5142!$OMP END PARALLEL WORKSHARE
5143
5144 CALL timestop(handle)
5145 END SUBROUTINE pw_copy_to_array_c1d_r1d_rs
5146
5147! **************************************************************************************************
5148!> \brief ...
5149!> \param pw ...
5150!> \param array ...
5151! **************************************************************************************************
5152 SUBROUTINE pw_copy_from_array_c1d_r1d_rs (pw, array)
5153 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5154 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
5155
5156 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
5157
5158 INTEGER :: handle
5159
5160 CALL timeset(routinen, handle)
5161
5162!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5163 pw%array = cmplx(array, 0.0_dp, kind=dp)
5164!$OMP END PARALLEL WORKSHARE
5165
5166 CALL timestop(handle)
5167 END SUBROUTINE pw_copy_from_array_c1d_r1d_rs
5168
5169! **************************************************************************************************
5170!> \brief pw2 = alpha*pw1 + beta*pw2
5171!> alpha defaults to 1, beta defaults to 1
5172!> \param pw1 ...
5173!> \param pw2 ...
5174!> \param alpha ...
5175!> \param beta ...
5176!> \param allow_noncompatible_grids ...
5177!> \par History
5178!> JGH (21-Feb-2003) : added reference grid functionality
5179!> JGH (01-Dec-2007) : rename and remove complex alpha
5180!> \author apsi
5181!> \note
5182!> Currently only summing up of respective types allowed,
5183!> in order to avoid errors
5184! **************************************************************************************************
5185 SUBROUTINE pw_axpy_c1d_r1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
5186
5187 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5188 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
5189 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
5190 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
5191
5192 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
5193
5194 INTEGER :: handle
5195 LOGICAL :: my_allow_noncompatible_grids
5196 REAL(KIND=dp) :: my_alpha, my_beta
5197 INTEGER :: i, j, ng, ng1, ng2
5198
5199 CALL timeset(routinen, handle)
5200
5201 my_alpha = 1.0_dp
5202 IF (PRESENT(alpha)) my_alpha = alpha
5203
5204 my_beta = 1.0_dp
5205 IF (PRESENT(beta)) my_beta = beta
5206
5207 my_allow_noncompatible_grids = .false.
5208 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
5209
5210 IF (my_beta /= 1.0_dp) THEN
5211 IF (my_beta == 0.0_dp) THEN
5212 CALL pw_zero(pw2)
5213 ELSE
5214!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
5215 pw2%array = pw2%array*my_beta
5216!$OMP END PARALLEL WORKSHARE
5217 END IF
5218 END IF
5219
5220 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5221
5222 IF (my_alpha == 1.0_dp) THEN
5223!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5224 pw2%array = pw2%array + real(pw1%array, kind=dp)
5225!$OMP END PARALLEL WORKSHARE
5226 ELSE IF (my_alpha /= 0.0_dp) THEN
5227!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
5228 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
5229!$OMP END PARALLEL WORKSHARE
5230 END IF
5231
5232 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
5233
5234 ng1 = SIZE(pw1%array)
5235 ng2 = SIZE(pw2%array)
5236 ng = min(ng1, ng2)
5237
5238 IF (pw1%pw_grid%spherical) THEN
5239 IF (my_alpha == 1.0_dp) THEN
5240!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5241 DO i = 1, ng
5242 pw2%array(i) = pw2%array(i) + real(pw1%array(i), kind=dp)
5243 END DO
5244!$OMP END PARALLEL DO
5245 ELSE IF (my_alpha /= 0.0_dp) THEN
5246!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
5247 DO i = 1, ng
5248 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(i), kind=dp)
5249 END DO
5250!$OMP END PARALLEL DO
5251 END IF
5252 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5253 IF (ng1 >= ng2) THEN
5254 IF (my_alpha == 1.0_dp) THEN
5255!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5256 DO i = 1, ng
5257 j = pw2%pw_grid%gidx(i)
5258 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5259 END DO
5260!$OMP END PARALLEL DO
5261 ELSE IF (my_alpha /= 0.0_dp) THEN
5262!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5263 DO i = 1, ng
5264 j = pw2%pw_grid%gidx(i)
5265 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5266 END DO
5267!$OMP END PARALLEL DO
5268 END IF
5269 ELSE
5270 IF (my_alpha == 1.0_dp) THEN
5271!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5272 DO i = 1, ng
5273 j = pw2%pw_grid%gidx(i)
5274 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5275 END DO
5276!$OMP END PARALLEL DO
5277 ELSE IF (my_alpha /= 0.0_dp) THEN
5278!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5279 DO i = 1, ng
5280 j = pw2%pw_grid%gidx(i)
5281 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5282 END DO
5283!$OMP END PARALLEL DO
5284 END IF
5285 END IF
5286 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5287 IF (ng1 >= ng2) THEN
5288 IF (my_alpha == 1.0_dp) THEN
5289!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5290 DO i = 1, ng
5291 j = pw1%pw_grid%gidx(i)
5292 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5293 END DO
5294!$OMP END PARALLEL DO
5295 ELSE IF (my_alpha /= 0.0_dp) THEN
5296!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5297 DO i = 1, ng
5298 j = pw1%pw_grid%gidx(i)
5299 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5300 END DO
5301!$OMP END PARALLEL DO
5302 END IF
5303 ELSE
5304 IF (my_alpha == 1.0_dp) THEN
5305!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5306 DO i = 1, ng
5307 j = pw1%pw_grid%gidx(i)
5308 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5309 END DO
5310!$OMP END PARALLEL DO
5311 ELSE IF (my_alpha /= 0.0_dp) THEN
5312!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5313 DO i = 1, ng
5314 j = pw1%pw_grid%gidx(i)
5315 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5316 END DO
5317!$OMP END PARALLEL DO
5318 END IF
5319 END IF
5320 ELSE
5321 cpabort("Grids not compatible")
5322 END IF
5323
5324 ELSE
5325
5326 cpabort("Grids not compatible")
5327
5328 END IF
5329
5330 CALL timestop(handle)
5331
5332 END SUBROUTINE pw_axpy_c1d_r1d_rs
5333
5334! **************************************************************************************************
5335!> \brief pw_out = pw_out + alpha * pw1 * pw2
5336!> alpha defaults to 1
5337!> \param pw_out ...
5338!> \param pw1 ...
5339!> \param pw2 ...
5340!> \param alpha ...
5341!> \author JGH
5342! **************************************************************************************************
5343 SUBROUTINE pw_multiply_c1d_r1d_rs (pw_out, pw1, pw2, alpha)
5344
5345 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw_out
5346 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5347 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5348 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
5349
5350 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
5351
5352 INTEGER :: handle
5353 REAL(KIND=dp) :: my_alpha
5354
5355 CALL timeset(routinen, handle)
5356
5357 my_alpha = 1.0_dp
5358 IF (PRESENT(alpha)) my_alpha = alpha
5359
5360 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
5361 cpabort("pw_multiply not implemented for non-identical grids!")
5362
5363#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
5364 IF (my_alpha == 1.0_dp) THEN
5365!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
5366 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5367!$OMP END PARALLEL WORKSHARE
5368 ELSE
5369!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
5370 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5371!$OMP END PARALLEL WORKSHARE
5372 END IF
5373#else
5374 IF (my_alpha == 1.0_dp) THEN
5375 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5376 ELSE
5377 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5378 END IF
5379#endif
5380
5381 CALL timestop(handle)
5382
5383 END SUBROUTINE pw_multiply_c1d_r1d_rs
5384
5385! **************************************************************************************************
5386!> \brief ...
5387!> \param pw1 ...
5388!> \param pw2 ...
5389! **************************************************************************************************
5390 SUBROUTINE pw_multiply_with_c1d_r1d_rs (pw1, pw2)
5391 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw1
5392 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5393
5394 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
5395
5396 INTEGER :: handle
5397
5398 CALL timeset(routinen, handle)
5399
5400 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
5401 cpabort("Incompatible grids!")
5402
5403!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
5404 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5405!$OMP END PARALLEL WORKSHARE
5406
5407 CALL timestop(handle)
5408
5409 END SUBROUTINE pw_multiply_with_c1d_r1d_rs
5410
5411! **************************************************************************************************
5412!> \brief Calculate integral over unit cell for functions in plane wave basis
5413!> only returns the real part of it ......
5414!> \param pw1 ...
5415!> \param pw2 ...
5416!> \param sumtype ...
5417!> \param just_sum ...
5418!> \param local_only ...
5419!> \return ...
5420!> \par History
5421!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
5422!> \author apsi
5423! **************************************************************************************************
5424 FUNCTION pw_integral_ab_c1d_r1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
5425
5426 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5427 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5428 INTEGER, INTENT(IN), OPTIONAL :: sumtype
5429 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
5430 REAL(kind=dp) :: integral_value
5431
5432 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_r1d_rs'
5433
5434 INTEGER :: handle, loc_sumtype
5435 LOGICAL :: my_just_sum, my_local_only
5436
5437 CALL timeset(routinen, handle)
5438
5439 loc_sumtype = do_accurate_sum
5440 IF (PRESENT(sumtype)) loc_sumtype = sumtype
5441
5442 my_local_only = .false.
5443 IF (PRESENT(local_only)) my_local_only = local_only
5444
5445 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5446 cpabort("Grids incompatible")
5447 END IF
5448
5449 my_just_sum = .false.
5450 IF (PRESENT(just_sum)) my_just_sum = just_sum
5451
5452 ! do standard sum
5453 IF (loc_sumtype == do_standard_sum) THEN
5454
5455 ! Do standard sum
5456
5457 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5458
5459 ELSE
5460
5461 ! Do accurate sum
5462 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5463
5464 END IF
5465
5466 IF (.NOT. my_just_sum) THEN
5467 integral_value = integral_value*pw1%pw_grid%dvol
5468 END IF
5469
5470 IF (pw1%pw_grid%grid_span == halfspace) THEN
5471 integral_value = 2.0_dp*integral_value
5472 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
5473 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
5474 END IF
5475
5476 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5477 CALL pw1%pw_grid%para%group%sum(integral_value)
5478
5479 CALL timestop(handle)
5480
5481 END FUNCTION pw_integral_ab_c1d_r1d_rs
5482! **************************************************************************************************
5483!> \brief copy a pw type variable
5484!> \param pw1 ...
5485!> \param pw2 ...
5486!> \par History
5487!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5488!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5489!> JGH (21-Feb-2003) : Code for generalized reference grids
5490!> \author apsi
5491!> \note
5492!> Currently only copying of respective types allowed,
5493!> in order to avoid errors
5494! **************************************************************************************************
5495 SUBROUTINE pw_copy_c1d_r1d_gs (pw1, pw2)
5496
5497 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5498 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
5499
5500 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5501
5502 INTEGER :: handle
5503 INTEGER :: i, j, ng, ng1, ng2, ns
5504
5505 CALL timeset(routinen, handle)
5506
5507 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
5508 cpabort("Both grids must be either spherical or non-spherical!")
5509
5510 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5511 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
5512 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
5513 ng1 = SIZE(pw1%array)
5514 ng2 = SIZE(pw2%array)
5515 ng = min(ng1, ng2)
5516!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
5517 pw2%array(1:ng) = real(pw1%array(1:ng), kind=dp)
5518!$OMP END PARALLEL WORKSHARE
5519 IF (ng2 > ng) THEN
5520!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
5521 pw2%array(ng + 1:ng2) = 0.0_dp
5522!$OMP END PARALLEL WORKSHARE
5523 END IF
5524 ELSE
5525 cpabort("Copies between spherical grids require compatible grids!")
5526 END IF
5527 ELSE
5528 ng1 = SIZE(pw1%array)
5529 ng2 = SIZE(pw2%array)
5530 ns = 2*max(ng1, ng2)
5531
5532 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5533 IF (ng1 >= ng2) THEN
5534!$OMP PARALLEL DO DEFAULT(NONE) &
5535!$OMP PRIVATE(i,j) &
5536!$OMP SHARED(ng2, pw1, pw2)
5537 DO i = 1, ng2
5538 j = pw2%pw_grid%gidx(i)
5539 pw2%array(i) = real(pw1%array(j), kind=dp)
5540 END DO
5541!$OMP END PARALLEL DO
5542 ELSE
5543 CALL pw_zero(pw2)
5544!$OMP PARALLEL DO DEFAULT(NONE) &
5545!$OMP PRIVATE(i,j) &
5546!$OMP SHARED(ng1, pw1, pw2)
5547 DO i = 1, ng1
5548 j = pw2%pw_grid%gidx(i)
5549 pw2%array(j) = real(pw1%array(i), kind=dp)
5550 END DO
5551!$OMP END PARALLEL DO
5552 END IF
5553 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5554 IF (ng1 >= ng2) THEN
5555!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5556 DO i = 1, ng2
5557 j = pw1%pw_grid%gidx(i)
5558 pw2%array(i) = real(pw1%array(j), kind=dp)
5559 END DO
5560!$OMP END PARALLEL DO
5561 ELSE
5562 CALL pw_zero(pw2)
5563!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5564 DO i = 1, ng1
5565 j = pw1%pw_grid%gidx(i)
5566 pw2%array(j) = real(pw1%array(i), kind=dp)
5567 END DO
5568!$OMP END PARALLEL DO
5569 END IF
5570 ELSE
5571 cpabort("Copy not implemented!")
5572 END IF
5573
5574 END IF
5575
5576 ELSE
5577!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5578 pw2%array = real(pw1%array, kind=dp)
5579!$OMP END PARALLEL WORKSHARE
5580 END IF
5581
5582 CALL timestop(handle)
5583
5584 END SUBROUTINE pw_copy_c1d_r1d_gs
5585
5586! **************************************************************************************************
5587!> \brief ...
5588!> \param pw ...
5589!> \param array ...
5590! **************************************************************************************************
5591 SUBROUTINE pw_copy_to_array_c1d_r1d_gs (pw, array)
5592 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
5593 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
5594
5595 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
5596
5597 INTEGER :: handle
5598
5599 CALL timeset(routinen, handle)
5600
5601!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5602 array(:) = real(pw%array(:), kind=dp)
5603!$OMP END PARALLEL WORKSHARE
5604
5605 CALL timestop(handle)
5606 END SUBROUTINE pw_copy_to_array_c1d_r1d_gs
5607
5608! **************************************************************************************************
5609!> \brief ...
5610!> \param pw ...
5611!> \param array ...
5612! **************************************************************************************************
5613 SUBROUTINE pw_copy_from_array_c1d_r1d_gs (pw, array)
5614 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
5615 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
5616
5617 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
5618
5619 INTEGER :: handle
5620
5621 CALL timeset(routinen, handle)
5622
5623!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5624 pw%array = cmplx(array, 0.0_dp, kind=dp)
5625!$OMP END PARALLEL WORKSHARE
5626
5627 CALL timestop(handle)
5628 END SUBROUTINE pw_copy_from_array_c1d_r1d_gs
5629
5630! **************************************************************************************************
5631!> \brief pw2 = alpha*pw1 + beta*pw2
5632!> alpha defaults to 1, beta defaults to 1
5633!> \param pw1 ...
5634!> \param pw2 ...
5635!> \param alpha ...
5636!> \param beta ...
5637!> \param allow_noncompatible_grids ...
5638!> \par History
5639!> JGH (21-Feb-2003) : added reference grid functionality
5640!> JGH (01-Dec-2007) : rename and remove complex alpha
5641!> \author apsi
5642!> \note
5643!> Currently only summing up of respective types allowed,
5644!> in order to avoid errors
5645! **************************************************************************************************
5646 SUBROUTINE pw_axpy_c1d_r1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
5647
5648 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5649 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
5650 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
5651 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
5652
5653 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
5654
5655 INTEGER :: handle
5656 LOGICAL :: my_allow_noncompatible_grids
5657 REAL(KIND=dp) :: my_alpha, my_beta
5658 INTEGER :: i, j, ng, ng1, ng2
5659
5660 CALL timeset(routinen, handle)
5661
5662 my_alpha = 1.0_dp
5663 IF (PRESENT(alpha)) my_alpha = alpha
5664
5665 my_beta = 1.0_dp
5666 IF (PRESENT(beta)) my_beta = beta
5667
5668 my_allow_noncompatible_grids = .false.
5669 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
5670
5671 IF (my_beta /= 1.0_dp) THEN
5672 IF (my_beta == 0.0_dp) THEN
5673 CALL pw_zero(pw2)
5674 ELSE
5675!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
5676 pw2%array = pw2%array*my_beta
5677!$OMP END PARALLEL WORKSHARE
5678 END IF
5679 END IF
5680
5681 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5682
5683 IF (my_alpha == 1.0_dp) THEN
5684!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5685 pw2%array = pw2%array + real(pw1%array, kind=dp)
5686!$OMP END PARALLEL WORKSHARE
5687 ELSE IF (my_alpha /= 0.0_dp) THEN
5688!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
5689 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
5690!$OMP END PARALLEL WORKSHARE
5691 END IF
5692
5693 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
5694
5695 ng1 = SIZE(pw1%array)
5696 ng2 = SIZE(pw2%array)
5697 ng = min(ng1, ng2)
5698
5699 IF (pw1%pw_grid%spherical) THEN
5700 IF (my_alpha == 1.0_dp) THEN
5701!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5702 DO i = 1, ng
5703 pw2%array(i) = pw2%array(i) + real(pw1%array(i), kind=dp)
5704 END DO
5705!$OMP END PARALLEL DO
5706 ELSE IF (my_alpha /= 0.0_dp) THEN
5707!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
5708 DO i = 1, ng
5709 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(i), kind=dp)
5710 END DO
5711!$OMP END PARALLEL DO
5712 END IF
5713 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5714 IF (ng1 >= ng2) THEN
5715 IF (my_alpha == 1.0_dp) THEN
5716!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5717 DO i = 1, ng
5718 j = pw2%pw_grid%gidx(i)
5719 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5720 END DO
5721!$OMP END PARALLEL DO
5722 ELSE IF (my_alpha /= 0.0_dp) THEN
5723!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5724 DO i = 1, ng
5725 j = pw2%pw_grid%gidx(i)
5726 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5727 END DO
5728!$OMP END PARALLEL DO
5729 END IF
5730 ELSE
5731 IF (my_alpha == 1.0_dp) THEN
5732!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5733 DO i = 1, ng
5734 j = pw2%pw_grid%gidx(i)
5735 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5736 END DO
5737!$OMP END PARALLEL DO
5738 ELSE IF (my_alpha /= 0.0_dp) THEN
5739!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5740 DO i = 1, ng
5741 j = pw2%pw_grid%gidx(i)
5742 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5743 END DO
5744!$OMP END PARALLEL DO
5745 END IF
5746 END IF
5747 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5748 IF (ng1 >= ng2) THEN
5749 IF (my_alpha == 1.0_dp) THEN
5750!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5751 DO i = 1, ng
5752 j = pw1%pw_grid%gidx(i)
5753 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5754 END DO
5755!$OMP END PARALLEL DO
5756 ELSE IF (my_alpha /= 0.0_dp) THEN
5757!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5758 DO i = 1, ng
5759 j = pw1%pw_grid%gidx(i)
5760 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5761 END DO
5762!$OMP END PARALLEL DO
5763 END IF
5764 ELSE
5765 IF (my_alpha == 1.0_dp) THEN
5766!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5767 DO i = 1, ng
5768 j = pw1%pw_grid%gidx(i)
5769 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5770 END DO
5771!$OMP END PARALLEL DO
5772 ELSE IF (my_alpha /= 0.0_dp) THEN
5773!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5774 DO i = 1, ng
5775 j = pw1%pw_grid%gidx(i)
5776 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5777 END DO
5778!$OMP END PARALLEL DO
5779 END IF
5780 END IF
5781 ELSE
5782 cpabort("Grids not compatible")
5783 END IF
5784
5785 ELSE
5786
5787 cpabort("Grids not compatible")
5788
5789 END IF
5790
5791 CALL timestop(handle)
5792
5793 END SUBROUTINE pw_axpy_c1d_r1d_gs
5794
5795! **************************************************************************************************
5796!> \brief pw_out = pw_out + alpha * pw1 * pw2
5797!> alpha defaults to 1
5798!> \param pw_out ...
5799!> \param pw1 ...
5800!> \param pw2 ...
5801!> \param alpha ...
5802!> \author JGH
5803! **************************************************************************************************
5804 SUBROUTINE pw_multiply_c1d_r1d_gs (pw_out, pw1, pw2, alpha)
5805
5806 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw_out
5807 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5808 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5809 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
5810
5811 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
5812
5813 INTEGER :: handle
5814 REAL(KIND=dp) :: my_alpha
5815
5816 CALL timeset(routinen, handle)
5817
5818 my_alpha = 1.0_dp
5819 IF (PRESENT(alpha)) my_alpha = alpha
5820
5821 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
5822 cpabort("pw_multiply not implemented for non-identical grids!")
5823
5824#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
5825 IF (my_alpha == 1.0_dp) THEN
5826!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
5827 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5828!$OMP END PARALLEL WORKSHARE
5829 ELSE
5830!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
5831 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5832!$OMP END PARALLEL WORKSHARE
5833 END IF
5834#else
5835 IF (my_alpha == 1.0_dp) THEN
5836 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5837 ELSE
5838 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5839 END IF
5840#endif
5841
5842 CALL timestop(handle)
5843
5844 END SUBROUTINE pw_multiply_c1d_r1d_gs
5845
5846! **************************************************************************************************
5847!> \brief ...
5848!> \param pw1 ...
5849!> \param pw2 ...
5850! **************************************************************************************************
5851 SUBROUTINE pw_multiply_with_c1d_r1d_gs (pw1, pw2)
5852 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw1
5853 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5854
5855 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
5856
5857 INTEGER :: handle
5858
5859 CALL timeset(routinen, handle)
5860
5861 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
5862 cpabort("Incompatible grids!")
5863
5864!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
5865 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5866!$OMP END PARALLEL WORKSHARE
5867
5868 CALL timestop(handle)
5869
5870 END SUBROUTINE pw_multiply_with_c1d_r1d_gs
5871
5872! **************************************************************************************************
5873!> \brief Calculate integral over unit cell for functions in plane wave basis
5874!> only returns the real part of it ......
5875!> \param pw1 ...
5876!> \param pw2 ...
5877!> \param sumtype ...
5878!> \param just_sum ...
5879!> \param local_only ...
5880!> \return ...
5881!> \par History
5882!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
5883!> \author apsi
5884! **************************************************************************************************
5885 FUNCTION pw_integral_ab_c1d_r1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
5886
5887 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5888 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5889 INTEGER, INTENT(IN), OPTIONAL :: sumtype
5890 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
5891 REAL(kind=dp) :: integral_value
5892
5893 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_r1d_gs'
5894
5895 INTEGER :: handle, loc_sumtype
5896 LOGICAL :: my_just_sum, my_local_only
5897
5898 CALL timeset(routinen, handle)
5899
5900 loc_sumtype = do_accurate_sum
5901 IF (PRESENT(sumtype)) loc_sumtype = sumtype
5902
5903 my_local_only = .false.
5904 IF (PRESENT(local_only)) my_local_only = local_only
5905
5906 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5907 cpabort("Grids incompatible")
5908 END IF
5909
5910 my_just_sum = .false.
5911 IF (PRESENT(just_sum)) my_just_sum = just_sum
5912
5913 ! do standard sum
5914 IF (loc_sumtype == do_standard_sum) THEN
5915
5916 ! Do standard sum
5917
5918 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5919
5920 ELSE
5921
5922 ! Do accurate sum
5923 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5924
5925 END IF
5926
5927 IF (.NOT. my_just_sum) THEN
5928 integral_value = integral_value*pw1%pw_grid%vol
5929 END IF
5930
5931 IF (pw1%pw_grid%grid_span == halfspace) THEN
5932 integral_value = 2.0_dp*integral_value
5933 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
5934 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
5935 END IF
5936
5937 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5938 CALL pw1%pw_grid%para%group%sum(integral_value)
5939
5940 CALL timestop(handle)
5941
5942 END FUNCTION pw_integral_ab_c1d_r1d_gs
5943
5944! **************************************************************************************************
5945!> \brief ...
5946!> \param pw1 ...
5947!> \param pw2 ...
5948!> \return ...
5949! **************************************************************************************************
5950 FUNCTION pw_integral_a2b_c1d_r1d (pw1, pw2) RESULT(integral_value)
5951
5952 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5953 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5954 REAL(kind=dp) :: integral_value
5955
5956 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
5957
5958 INTEGER :: handle
5959
5960 CALL timeset(routinen, handle)
5961
5962 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5963 cpabort("Grids incompatible")
5964 END IF
5965
5966 integral_value = accurate_sum(real(conjg(pw1%array), kind=dp)*pw2%array*pw1%pw_grid%gsq)
5967 IF (pw1%pw_grid%grid_span == halfspace) THEN
5968 integral_value = 2.0_dp*integral_value
5969 END IF
5970
5971 integral_value = integral_value*pw1%pw_grid%vol
5972
5973 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
5974 CALL pw1%pw_grid%para%group%sum(integral_value)
5975 CALL timestop(handle)
5976
5977 END FUNCTION pw_integral_a2b_c1d_r1d
5978! **************************************************************************************************
5979!> \brief copy a pw type variable
5980!> \param pw1 ...
5981!> \param pw2 ...
5982!> \par History
5983!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5984!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5985!> JGH (21-Feb-2003) : Code for generalized reference grids
5986!> \author apsi
5987!> \note
5988!> Currently only copying of respective types allowed,
5989!> in order to avoid errors
5990! **************************************************************************************************
5991 SUBROUTINE pw_copy_c1d_c1d_rs (pw1, pw2)
5992
5993 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5994 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
5995
5996 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5997
5998 INTEGER :: handle
5999 INTEGER :: i, j, ng, ng1, ng2, ns
6000
6001 CALL timeset(routinen, handle)
6002
6003 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6004 cpabort("Both grids must be either spherical or non-spherical!")
6005 IF (pw1%pw_grid%spherical) &
6006 cpabort("Spherical grids only exist in reciprocal space!")
6007
6008 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6009 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
6010 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
6011 ng1 = SIZE(pw1%array)
6012 ng2 = SIZE(pw2%array)
6013 ng = min(ng1, ng2)
6014!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
6015 pw2%array(1:ng) = pw1%array(1:ng)
6016!$OMP END PARALLEL WORKSHARE
6017 IF (ng2 > ng) THEN
6018!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
6019 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
6020!$OMP END PARALLEL WORKSHARE
6021 END IF
6022 ELSE
6023 cpabort("Copies between spherical grids require compatible grids!")
6024 END IF
6025 ELSE
6026 ng1 = SIZE(pw1%array)
6027 ng2 = SIZE(pw2%array)
6028 ns = 2*max(ng1, ng2)
6029
6030 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6031 IF (ng1 >= ng2) THEN
6032!$OMP PARALLEL DO DEFAULT(NONE) &
6033!$OMP PRIVATE(i,j) &
6034!$OMP SHARED(ng2, pw1, pw2)
6035 DO i = 1, ng2
6036 j = pw2%pw_grid%gidx(i)
6037 pw2%array(i) = pw1%array(j)
6038 END DO
6039!$OMP END PARALLEL DO
6040 ELSE
6041 CALL pw_zero(pw2)
6042!$OMP PARALLEL DO DEFAULT(NONE) &
6043!$OMP PRIVATE(i,j) &
6044!$OMP SHARED(ng1, pw1, pw2)
6045 DO i = 1, ng1
6046 j = pw2%pw_grid%gidx(i)
6047 pw2%array(j) = pw1%array(i)
6048 END DO
6049!$OMP END PARALLEL DO
6050 END IF
6051 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6052 IF (ng1 >= ng2) THEN
6053!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
6054 DO i = 1, ng2
6055 j = pw1%pw_grid%gidx(i)
6056 pw2%array(i) = pw1%array(j)
6057 END DO
6058!$OMP END PARALLEL DO
6059 ELSE
6060 CALL pw_zero(pw2)
6061!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
6062 DO i = 1, ng1
6063 j = pw1%pw_grid%gidx(i)
6064 pw2%array(j) = pw1%array(i)
6065 END DO
6066!$OMP END PARALLEL DO
6067 END IF
6068 ELSE
6069 cpabort("Copy not implemented!")
6070 END IF
6071
6072 END IF
6073
6074 ELSE
6075!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6076 pw2%array = pw1%array
6077!$OMP END PARALLEL WORKSHARE
6078 END IF
6079
6080 CALL timestop(handle)
6081
6082 END SUBROUTINE pw_copy_c1d_c1d_rs
6083
6084! **************************************************************************************************
6085!> \brief ...
6086!> \param pw ...
6087!> \param array ...
6088! **************************************************************************************************
6089 SUBROUTINE pw_copy_to_array_c1d_c1d_rs (pw, array)
6090 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
6091 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
6092
6093 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6094
6095 INTEGER :: handle
6096
6097 CALL timeset(routinen, handle)
6098
6099!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6100 array(:) = pw%array(:)
6101!$OMP END PARALLEL WORKSHARE
6102
6103 CALL timestop(handle)
6104 END SUBROUTINE pw_copy_to_array_c1d_c1d_rs
6105
6106! **************************************************************************************************
6107!> \brief ...
6108!> \param pw ...
6109!> \param array ...
6110! **************************************************************************************************
6111 SUBROUTINE pw_copy_from_array_c1d_c1d_rs (pw, array)
6112 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
6113 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
6114
6115 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
6116
6117 INTEGER :: handle
6118
6119 CALL timeset(routinen, handle)
6120
6121!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6122 pw%array = array
6123!$OMP END PARALLEL WORKSHARE
6124
6125 CALL timestop(handle)
6126 END SUBROUTINE pw_copy_from_array_c1d_c1d_rs
6127
6128! **************************************************************************************************
6129!> \brief pw2 = alpha*pw1 + beta*pw2
6130!> alpha defaults to 1, beta defaults to 1
6131!> \param pw1 ...
6132!> \param pw2 ...
6133!> \param alpha ...
6134!> \param beta ...
6135!> \param allow_noncompatible_grids ...
6136!> \par History
6137!> JGH (21-Feb-2003) : added reference grid functionality
6138!> JGH (01-Dec-2007) : rename and remove complex alpha
6139!> \author apsi
6140!> \note
6141!> Currently only summing up of respective types allowed,
6142!> in order to avoid errors
6143! **************************************************************************************************
6144 SUBROUTINE pw_axpy_c1d_c1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
6145
6146 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6147 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
6148 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
6149 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
6150
6151 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
6152
6153 INTEGER :: handle
6154 LOGICAL :: my_allow_noncompatible_grids
6155 REAL(KIND=dp) :: my_alpha, my_beta
6156 INTEGER :: i, j, ng, ng1, ng2
6157
6158 CALL timeset(routinen, handle)
6159
6160 my_alpha = 1.0_dp
6161 IF (PRESENT(alpha)) my_alpha = alpha
6162
6163 my_beta = 1.0_dp
6164 IF (PRESENT(beta)) my_beta = beta
6165
6166 my_allow_noncompatible_grids = .false.
6167 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
6168
6169 IF (my_beta /= 1.0_dp) THEN
6170 IF (my_beta == 0.0_dp) THEN
6171 CALL pw_zero(pw2)
6172 ELSE
6173!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
6174 pw2%array = pw2%array*my_beta
6175!$OMP END PARALLEL WORKSHARE
6176 END IF
6177 END IF
6178
6179 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6180
6181 IF (my_alpha == 1.0_dp) THEN
6182!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6183 pw2%array = pw2%array + pw1%array
6184!$OMP END PARALLEL WORKSHARE
6185 ELSE IF (my_alpha /= 0.0_dp) THEN
6186!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
6187 pw2%array = pw2%array + my_alpha* pw1%array
6188!$OMP END PARALLEL WORKSHARE
6189 END IF
6190
6191 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
6192
6193 ng1 = SIZE(pw1%array)
6194 ng2 = SIZE(pw2%array)
6195 ng = min(ng1, ng2)
6196
6197 IF (pw1%pw_grid%spherical) THEN
6198 IF (my_alpha == 1.0_dp) THEN
6199!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6200 DO i = 1, ng
6201 pw2%array(i) = pw2%array(i) + pw1%array(i)
6202 END DO
6203!$OMP END PARALLEL DO
6204 ELSE IF (my_alpha /= 0.0_dp) THEN
6205!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
6206 DO i = 1, ng
6207 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
6208 END DO
6209!$OMP END PARALLEL DO
6210 END IF
6211 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6212 IF (ng1 >= ng2) THEN
6213 IF (my_alpha == 1.0_dp) THEN
6214!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6215 DO i = 1, ng
6216 j = pw2%pw_grid%gidx(i)
6217 pw2%array(i) = pw2%array(i) + pw1%array(j)
6218 END DO
6219!$OMP END PARALLEL DO
6220 ELSE IF (my_alpha /= 0.0_dp) THEN
6221!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6222 DO i = 1, ng
6223 j = pw2%pw_grid%gidx(i)
6224 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6225 END DO
6226!$OMP END PARALLEL DO
6227 END IF
6228 ELSE
6229 IF (my_alpha == 1.0_dp) THEN
6230!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6231 DO i = 1, ng
6232 j = pw2%pw_grid%gidx(i)
6233 pw2%array(j) = pw2%array(j) + pw1%array(i)
6234 END DO
6235!$OMP END PARALLEL DO
6236 ELSE IF (my_alpha /= 0.0_dp) THEN
6237!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6238 DO i = 1, ng
6239 j = pw2%pw_grid%gidx(i)
6240 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6241 END DO
6242!$OMP END PARALLEL DO
6243 END IF
6244 END IF
6245 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6246 IF (ng1 >= ng2) THEN
6247 IF (my_alpha == 1.0_dp) THEN
6248!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6249 DO i = 1, ng
6250 j = pw1%pw_grid%gidx(i)
6251 pw2%array(i) = pw2%array(i) + pw1%array(j)
6252 END DO
6253!$OMP END PARALLEL DO
6254 ELSE IF (my_alpha /= 0.0_dp) THEN
6255!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6256 DO i = 1, ng
6257 j = pw1%pw_grid%gidx(i)
6258 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6259 END DO
6260!$OMP END PARALLEL DO
6261 END IF
6262 ELSE
6263 IF (my_alpha == 1.0_dp) THEN
6264!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6265 DO i = 1, ng
6266 j = pw1%pw_grid%gidx(i)
6267 pw2%array(j) = pw2%array(j) + pw1%array(i)
6268 END DO
6269!$OMP END PARALLEL DO
6270 ELSE IF (my_alpha /= 0.0_dp) THEN
6271!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6272 DO i = 1, ng
6273 j = pw1%pw_grid%gidx(i)
6274 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6275 END DO
6276!$OMP END PARALLEL DO
6277 END IF
6278 END IF
6279 ELSE
6280 cpabort("Grids not compatible")
6281 END IF
6282
6283 ELSE
6284
6285 cpabort("Grids not compatible")
6286
6287 END IF
6288
6289 CALL timestop(handle)
6290
6291 END SUBROUTINE pw_axpy_c1d_c1d_rs
6292
6293! **************************************************************************************************
6294!> \brief pw_out = pw_out + alpha * pw1 * pw2
6295!> alpha defaults to 1
6296!> \param pw_out ...
6297!> \param pw1 ...
6298!> \param pw2 ...
6299!> \param alpha ...
6300!> \author JGH
6301! **************************************************************************************************
6302 SUBROUTINE pw_multiply_c1d_c1d_rs (pw_out, pw1, pw2, alpha)
6303
6304 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw_out
6305 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6306 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6307 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
6308
6309 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
6310
6311 INTEGER :: handle
6312 REAL(KIND=dp) :: my_alpha
6313
6314 CALL timeset(routinen, handle)
6315
6316 my_alpha = 1.0_dp
6317 IF (PRESENT(alpha)) my_alpha = alpha
6318
6319 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
6320 cpabort("pw_multiply not implemented for non-identical grids!")
6321
6322#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
6323 IF (my_alpha == 1.0_dp) THEN
6324!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
6325 pw_out%array = pw_out%array + pw1%array* pw2%array
6326!$OMP END PARALLEL WORKSHARE
6327 ELSE
6328!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
6329 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6330!$OMP END PARALLEL WORKSHARE
6331 END IF
6332#else
6333 IF (my_alpha == 1.0_dp) THEN
6334 pw_out%array = pw_out%array + pw1%array* pw2%array
6335 ELSE
6336 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6337 END IF
6338#endif
6339
6340 CALL timestop(handle)
6341
6342 END SUBROUTINE pw_multiply_c1d_c1d_rs
6343
6344! **************************************************************************************************
6345!> \brief ...
6346!> \param pw1 ...
6347!> \param pw2 ...
6348! **************************************************************************************************
6349 SUBROUTINE pw_multiply_with_c1d_c1d_rs (pw1, pw2)
6350 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw1
6351 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6352
6353 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
6354
6355 INTEGER :: handle
6356
6357 CALL timeset(routinen, handle)
6358
6359 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
6360 cpabort("Incompatible grids!")
6361
6362!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6363 pw1%array = pw1%array* pw2%array
6364!$OMP END PARALLEL WORKSHARE
6365
6366 CALL timestop(handle)
6367
6368 END SUBROUTINE pw_multiply_with_c1d_c1d_rs
6369
6370! **************************************************************************************************
6371!> \brief Calculate integral over unit cell for functions in plane wave basis
6372!> only returns the real part of it ......
6373!> \param pw1 ...
6374!> \param pw2 ...
6375!> \param sumtype ...
6376!> \param just_sum ...
6377!> \param local_only ...
6378!> \return ...
6379!> \par History
6380!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
6381!> \author apsi
6382! **************************************************************************************************
6383 FUNCTION pw_integral_ab_c1d_c1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
6384
6385 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6386 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6387 INTEGER, INTENT(IN), OPTIONAL :: sumtype
6388 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
6389 REAL(kind=dp) :: integral_value
6390
6391 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_c1d_rs'
6392
6393 INTEGER :: handle, loc_sumtype
6394 LOGICAL :: my_just_sum, my_local_only
6395
6396 CALL timeset(routinen, handle)
6397
6398 loc_sumtype = do_accurate_sum
6399 IF (PRESENT(sumtype)) loc_sumtype = sumtype
6400
6401 my_local_only = .false.
6402 IF (PRESENT(local_only)) my_local_only = local_only
6403
6404 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6405 cpabort("Grids incompatible")
6406 END IF
6407
6408 my_just_sum = .false.
6409 IF (PRESENT(just_sum)) my_just_sum = just_sum
6410
6411 ! do standard sum
6412 IF (loc_sumtype == do_standard_sum) THEN
6413
6414 ! Do standard sum
6415
6416 integral_value = sum(real(conjg(pw1%array) &
6417 *pw2%array, kind=dp)) !? complex bit
6418
6419 ELSE
6420
6421 ! Do accurate sum
6422 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
6423
6424 END IF
6425
6426 IF (.NOT. my_just_sum) THEN
6427 integral_value = integral_value*pw1%pw_grid%dvol
6428 END IF
6429
6430 IF (pw1%pw_grid%grid_span == halfspace) THEN
6431 integral_value = 2.0_dp*integral_value
6432 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
6433 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
6434 END IF
6435
6436 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
6437 CALL pw1%pw_grid%para%group%sum(integral_value)
6438
6439 CALL timestop(handle)
6440
6441 END FUNCTION pw_integral_ab_c1d_c1d_rs
6442! **************************************************************************************************
6443!> \brief copy a pw type variable
6444!> \param pw1 ...
6445!> \param pw2 ...
6446!> \par History
6447!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
6448!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
6449!> JGH (21-Feb-2003) : Code for generalized reference grids
6450!> \author apsi
6451!> \note
6452!> Currently only copying of respective types allowed,
6453!> in order to avoid errors
6454! **************************************************************************************************
6455 SUBROUTINE pw_copy_c1d_c1d_gs (pw1, pw2)
6456
6457 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6458 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
6459
6460 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
6461
6462 INTEGER :: handle
6463 INTEGER :: i, j, ng, ng1, ng2, ns
6464
6465 CALL timeset(routinen, handle)
6466
6467 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6468 cpabort("Both grids must be either spherical or non-spherical!")
6469
6470 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6471 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
6472 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
6473 ng1 = SIZE(pw1%array)
6474 ng2 = SIZE(pw2%array)
6475 ng = min(ng1, ng2)
6476!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
6477 pw2%array(1:ng) = pw1%array(1:ng)
6478!$OMP END PARALLEL WORKSHARE
6479 IF (ng2 > ng) THEN
6480!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
6481 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
6482!$OMP END PARALLEL WORKSHARE
6483 END IF
6484 ELSE
6485 cpabort("Copies between spherical grids require compatible grids!")
6486 END IF
6487 ELSE
6488 ng1 = SIZE(pw1%array)
6489 ng2 = SIZE(pw2%array)
6490 ns = 2*max(ng1, ng2)
6491
6492 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6493 IF (ng1 >= ng2) THEN
6494!$OMP PARALLEL DO DEFAULT(NONE) &
6495!$OMP PRIVATE(i,j) &
6496!$OMP SHARED(ng2, pw1, pw2)
6497 DO i = 1, ng2
6498 j = pw2%pw_grid%gidx(i)
6499 pw2%array(i) = pw1%array(j)
6500 END DO
6501!$OMP END PARALLEL DO
6502 ELSE
6503 CALL pw_zero(pw2)
6504!$OMP PARALLEL DO DEFAULT(NONE) &
6505!$OMP PRIVATE(i,j) &
6506!$OMP SHARED(ng1, pw1, pw2)
6507 DO i = 1, ng1
6508 j = pw2%pw_grid%gidx(i)
6509 pw2%array(j) = pw1%array(i)
6510 END DO
6511!$OMP END PARALLEL DO
6512 END IF
6513 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6514 IF (ng1 >= ng2) THEN
6515!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
6516 DO i = 1, ng2
6517 j = pw1%pw_grid%gidx(i)
6518 pw2%array(i) = pw1%array(j)
6519 END DO
6520!$OMP END PARALLEL DO
6521 ELSE
6522 CALL pw_zero(pw2)
6523!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
6524 DO i = 1, ng1
6525 j = pw1%pw_grid%gidx(i)
6526 pw2%array(j) = pw1%array(i)
6527 END DO
6528!$OMP END PARALLEL DO
6529 END IF
6530 ELSE
6531 CALL pw_copy_match(pw1, pw2)
6532 END IF
6533
6534 END IF
6535
6536 ELSE
6537!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6538 pw2%array = pw1%array
6539!$OMP END PARALLEL WORKSHARE
6540 END IF
6541
6542 CALL timestop(handle)
6543
6544 END SUBROUTINE pw_copy_c1d_c1d_gs
6545
6546! **************************************************************************************************
6547!> \brief ...
6548!> \param pw ...
6549!> \param array ...
6550! **************************************************************************************************
6551 SUBROUTINE pw_copy_to_array_c1d_c1d_gs (pw, array)
6552 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
6553 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
6554
6555 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6556
6557 INTEGER :: handle
6558
6559 CALL timeset(routinen, handle)
6560
6561!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6562 array(:) = pw%array(:)
6563!$OMP END PARALLEL WORKSHARE
6564
6565 CALL timestop(handle)
6566 END SUBROUTINE pw_copy_to_array_c1d_c1d_gs
6567
6568! **************************************************************************************************
6569!> \brief ...
6570!> \param pw ...
6571!> \param array ...
6572! **************************************************************************************************
6573 SUBROUTINE pw_copy_from_array_c1d_c1d_gs (pw, array)
6574 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
6575 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
6576
6577 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
6578
6579 INTEGER :: handle
6580
6581 CALL timeset(routinen, handle)
6582
6583!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6584 pw%array = array
6585!$OMP END PARALLEL WORKSHARE
6586
6587 CALL timestop(handle)
6588 END SUBROUTINE pw_copy_from_array_c1d_c1d_gs
6589
6590! **************************************************************************************************
6591!> \brief pw2 = alpha*pw1 + beta*pw2
6592!> alpha defaults to 1, beta defaults to 1
6593!> \param pw1 ...
6594!> \param pw2 ...
6595!> \param alpha ...
6596!> \param beta ...
6597!> \param allow_noncompatible_grids ...
6598!> \par History
6599!> JGH (21-Feb-2003) : added reference grid functionality
6600!> JGH (01-Dec-2007) : rename and remove complex alpha
6601!> \author apsi
6602!> \note
6603!> Currently only summing up of respective types allowed,
6604!> in order to avoid errors
6605! **************************************************************************************************
6606 SUBROUTINE pw_axpy_c1d_c1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
6607
6608 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6609 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
6610 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
6611 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
6612
6613 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
6614
6615 INTEGER :: handle
6616 LOGICAL :: my_allow_noncompatible_grids
6617 REAL(KIND=dp) :: my_alpha, my_beta
6618 INTEGER :: i, j, ng, ng1, ng2
6619
6620 CALL timeset(routinen, handle)
6621
6622 my_alpha = 1.0_dp
6623 IF (PRESENT(alpha)) my_alpha = alpha
6624
6625 my_beta = 1.0_dp
6626 IF (PRESENT(beta)) my_beta = beta
6627
6628 my_allow_noncompatible_grids = .false.
6629 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
6630
6631 IF (my_beta /= 1.0_dp) THEN
6632 IF (my_beta == 0.0_dp) THEN
6633 CALL pw_zero(pw2)
6634 ELSE
6635!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
6636 pw2%array = pw2%array*my_beta
6637!$OMP END PARALLEL WORKSHARE
6638 END IF
6639 END IF
6640
6641 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6642
6643 IF (my_alpha == 1.0_dp) THEN
6644!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6645 pw2%array = pw2%array + pw1%array
6646!$OMP END PARALLEL WORKSHARE
6647 ELSE IF (my_alpha /= 0.0_dp) THEN
6648!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
6649 pw2%array = pw2%array + my_alpha* pw1%array
6650!$OMP END PARALLEL WORKSHARE
6651 END IF
6652
6653 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
6654
6655 ng1 = SIZE(pw1%array)
6656 ng2 = SIZE(pw2%array)
6657 ng = min(ng1, ng2)
6658
6659 IF (pw1%pw_grid%spherical) THEN
6660 IF (my_alpha == 1.0_dp) THEN
6661!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6662 DO i = 1, ng
6663 pw2%array(i) = pw2%array(i) + pw1%array(i)
6664 END DO
6665!$OMP END PARALLEL DO
6666 ELSE IF (my_alpha /= 0.0_dp) THEN
6667!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
6668 DO i = 1, ng
6669 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
6670 END DO
6671!$OMP END PARALLEL DO
6672 END IF
6673 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6674 IF (ng1 >= ng2) THEN
6675 IF (my_alpha == 1.0_dp) THEN
6676!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6677 DO i = 1, ng
6678 j = pw2%pw_grid%gidx(i)
6679 pw2%array(i) = pw2%array(i) + pw1%array(j)
6680 END DO
6681!$OMP END PARALLEL DO
6682 ELSE IF (my_alpha /= 0.0_dp) THEN
6683!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6684 DO i = 1, ng
6685 j = pw2%pw_grid%gidx(i)
6686 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6687 END DO
6688!$OMP END PARALLEL DO
6689 END IF
6690 ELSE
6691 IF (my_alpha == 1.0_dp) THEN
6692!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6693 DO i = 1, ng
6694 j = pw2%pw_grid%gidx(i)
6695 pw2%array(j) = pw2%array(j) + pw1%array(i)
6696 END DO
6697!$OMP END PARALLEL DO
6698 ELSE IF (my_alpha /= 0.0_dp) THEN
6699!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6700 DO i = 1, ng
6701 j = pw2%pw_grid%gidx(i)
6702 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6703 END DO
6704!$OMP END PARALLEL DO
6705 END IF
6706 END IF
6707 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6708 IF (ng1 >= ng2) THEN
6709 IF (my_alpha == 1.0_dp) THEN
6710!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6711 DO i = 1, ng
6712 j = pw1%pw_grid%gidx(i)
6713 pw2%array(i) = pw2%array(i) + pw1%array(j)
6714 END DO
6715!$OMP END PARALLEL DO
6716 ELSE IF (my_alpha /= 0.0_dp) THEN
6717!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6718 DO i = 1, ng
6719 j = pw1%pw_grid%gidx(i)
6720 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6721 END DO
6722!$OMP END PARALLEL DO
6723 END IF
6724 ELSE
6725 IF (my_alpha == 1.0_dp) THEN
6726!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6727 DO i = 1, ng
6728 j = pw1%pw_grid%gidx(i)
6729 pw2%array(j) = pw2%array(j) + pw1%array(i)
6730 END DO
6731!$OMP END PARALLEL DO
6732 ELSE IF (my_alpha /= 0.0_dp) THEN
6733!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6734 DO i = 1, ng
6735 j = pw1%pw_grid%gidx(i)
6736 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6737 END DO
6738!$OMP END PARALLEL DO
6739 END IF
6740 END IF
6741 ELSE
6742 cpabort("Grids not compatible")
6743 END IF
6744
6745 ELSE
6746
6747 cpabort("Grids not compatible")
6748
6749 END IF
6750
6751 CALL timestop(handle)
6752
6753 END SUBROUTINE pw_axpy_c1d_c1d_gs
6754
6755! **************************************************************************************************
6756!> \brief pw_out = pw_out + alpha * pw1 * pw2
6757!> alpha defaults to 1
6758!> \param pw_out ...
6759!> \param pw1 ...
6760!> \param pw2 ...
6761!> \param alpha ...
6762!> \author JGH
6763! **************************************************************************************************
6764 SUBROUTINE pw_multiply_c1d_c1d_gs (pw_out, pw1, pw2, alpha)
6765
6766 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw_out
6767 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6768 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6769 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
6770
6771 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
6772
6773 INTEGER :: handle
6774 REAL(KIND=dp) :: my_alpha
6775
6776 CALL timeset(routinen, handle)
6777
6778 my_alpha = 1.0_dp
6779 IF (PRESENT(alpha)) my_alpha = alpha
6780
6781 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
6782 cpabort("pw_multiply not implemented for non-identical grids!")
6783
6784#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
6785 IF (my_alpha == 1.0_dp) THEN
6786!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
6787 pw_out%array = pw_out%array + pw1%array* pw2%array
6788!$OMP END PARALLEL WORKSHARE
6789 ELSE
6790!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
6791 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6792!$OMP END PARALLEL WORKSHARE
6793 END IF
6794#else
6795 IF (my_alpha == 1.0_dp) THEN
6796 pw_out%array = pw_out%array + pw1%array* pw2%array
6797 ELSE
6798 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6799 END IF
6800#endif
6801
6802 CALL timestop(handle)
6803
6804 END SUBROUTINE pw_multiply_c1d_c1d_gs
6805
6806! **************************************************************************************************
6807!> \brief ...
6808!> \param pw1 ...
6809!> \param pw2 ...
6810! **************************************************************************************************
6811 SUBROUTINE pw_multiply_with_c1d_c1d_gs (pw1, pw2)
6812 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw1
6813 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6814
6815 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
6816
6817 INTEGER :: handle
6818
6819 CALL timeset(routinen, handle)
6820
6821 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
6822 cpabort("Incompatible grids!")
6823
6824!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6825 pw1%array = pw1%array* pw2%array
6826!$OMP END PARALLEL WORKSHARE
6827
6828 CALL timestop(handle)
6829
6830 END SUBROUTINE pw_multiply_with_c1d_c1d_gs
6831
6832! **************************************************************************************************
6833!> \brief Calculate integral over unit cell for functions in plane wave basis
6834!> only returns the real part of it ......
6835!> \param pw1 ...
6836!> \param pw2 ...
6837!> \param sumtype ...
6838!> \param just_sum ...
6839!> \param local_only ...
6840!> \return ...
6841!> \par History
6842!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
6843!> \author apsi
6844! **************************************************************************************************
6845 FUNCTION pw_integral_ab_c1d_c1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
6846
6847 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6848 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6849 INTEGER, INTENT(IN), OPTIONAL :: sumtype
6850 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
6851 REAL(kind=dp) :: integral_value
6852
6853 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_c1d_gs'
6854
6855 INTEGER :: handle, loc_sumtype
6856 LOGICAL :: my_just_sum, my_local_only
6857
6858 CALL timeset(routinen, handle)
6859
6860 loc_sumtype = do_accurate_sum
6861 IF (PRESENT(sumtype)) loc_sumtype = sumtype
6862
6863 my_local_only = .false.
6864 IF (PRESENT(local_only)) my_local_only = local_only
6865
6866 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6867 cpabort("Grids incompatible")
6868 END IF
6869
6870 my_just_sum = .false.
6871 IF (PRESENT(just_sum)) my_just_sum = just_sum
6872
6873 ! do standard sum
6874 IF (loc_sumtype == do_standard_sum) THEN
6875
6876 ! Do standard sum
6877
6878 integral_value = sum(real(conjg(pw1%array) &
6879 *pw2%array, kind=dp)) !? complex bit
6880
6881 ELSE
6882
6883 ! Do accurate sum
6884 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
6885
6886 END IF
6887
6888 IF (.NOT. my_just_sum) THEN
6889 integral_value = integral_value*pw1%pw_grid%vol
6890 END IF
6891
6892 IF (pw1%pw_grid%grid_span == halfspace) THEN
6893 integral_value = 2.0_dp*integral_value
6894 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
6895 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
6896 END IF
6897
6898 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
6899 CALL pw1%pw_grid%para%group%sum(integral_value)
6900
6901 CALL timestop(handle)
6902
6903 END FUNCTION pw_integral_ab_c1d_c1d_gs
6904
6905! **************************************************************************************************
6906!> \brief ...
6907!> \param pw1 ...
6908!> \param pw2 ...
6909!> \return ...
6910! **************************************************************************************************
6911 FUNCTION pw_integral_a2b_c1d_c1d (pw1, pw2) RESULT(integral_value)
6912
6913 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6914 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6915 REAL(kind=dp) :: integral_value
6916
6917 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
6918
6919 INTEGER :: handle
6920
6921 CALL timeset(routinen, handle)
6922
6923 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6924 cpabort("Grids incompatible")
6925 END IF
6926
6927 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp)*pw1%pw_grid%gsq)
6928 IF (pw1%pw_grid%grid_span == halfspace) THEN
6929 integral_value = 2.0_dp*integral_value
6930 END IF
6931
6932 integral_value = integral_value*pw1%pw_grid%vol
6933
6934 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
6935 CALL pw1%pw_grid%para%group%sum(integral_value)
6936 CALL timestop(handle)
6937
6938 END FUNCTION pw_integral_a2b_c1d_c1d
6939! **************************************************************************************************
6940!> \brief copy a pw type variable
6941!> \param pw1 ...
6942!> \param pw2 ...
6943!> \par History
6944!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
6945!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
6946!> JGH (21-Feb-2003) : Code for generalized reference grids
6947!> \author apsi
6948!> \note
6949!> Currently only copying of respective types allowed,
6950!> in order to avoid errors
6951! **************************************************************************************************
6952 SUBROUTINE pw_copy_c3d_r3d_rs (pw1, pw2)
6953
6954 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
6955 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
6956
6957 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
6958
6959 INTEGER :: handle
6960
6961 CALL timeset(routinen, handle)
6962
6963 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6964 cpabort("Both grids must be either spherical or non-spherical!")
6965 IF (pw1%pw_grid%spherical) &
6966 cpabort("Spherical grids only exist in reciprocal space!")
6967
6968 IF (any(shape(pw2%array) /= shape(pw1%array))) &
6969 cpabort("3D grids must be compatible!")
6970 IF (pw1%pw_grid%spherical) &
6971 cpabort("3D grids must not be spherical!")
6972!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6973 pw2%array(:, :, :) = real(pw1%array(:, :, :), kind=dp)
6974!$OMP END PARALLEL WORKSHARE
6975
6976 CALL timestop(handle)
6977
6978 END SUBROUTINE pw_copy_c3d_r3d_rs
6979
6980! **************************************************************************************************
6981!> \brief ...
6982!> \param pw ...
6983!> \param array ...
6984! **************************************************************************************************
6985 SUBROUTINE pw_copy_to_array_c3d_r3d_rs (pw, array)
6986 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
6987 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
6988
6989 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6990
6991 INTEGER :: handle
6992
6993 CALL timeset(routinen, handle)
6994
6995!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6996 array(:, :, :) = real(pw%array(:, :, :), kind=dp)
6997!$OMP END PARALLEL WORKSHARE
6998
6999 CALL timestop(handle)
7000 END SUBROUTINE pw_copy_to_array_c3d_r3d_rs
7001
7002! **************************************************************************************************
7003!> \brief ...
7004!> \param pw ...
7005!> \param array ...
7006! **************************************************************************************************
7007 SUBROUTINE pw_copy_from_array_c3d_r3d_rs (pw, array)
7008 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7009 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7010
7011 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7012
7013 INTEGER :: handle
7014
7015 CALL timeset(routinen, handle)
7016
7017!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7018 pw%array = cmplx(array, 0.0_dp, kind=dp)
7019!$OMP END PARALLEL WORKSHARE
7020
7021 CALL timestop(handle)
7022 END SUBROUTINE pw_copy_from_array_c3d_r3d_rs
7023
7024! **************************************************************************************************
7025!> \brief pw2 = alpha*pw1 + beta*pw2
7026!> alpha defaults to 1, beta defaults to 1
7027!> \param pw1 ...
7028!> \param pw2 ...
7029!> \param alpha ...
7030!> \param beta ...
7031!> \param allow_noncompatible_grids ...
7032!> \par History
7033!> JGH (21-Feb-2003) : added reference grid functionality
7034!> JGH (01-Dec-2007) : rename and remove complex alpha
7035!> \author apsi
7036!> \note
7037!> Currently only summing up of respective types allowed,
7038!> in order to avoid errors
7039! **************************************************************************************************
7040 SUBROUTINE pw_axpy_c3d_r3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7041
7042 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7043 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
7044 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7045 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7046
7047 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7048
7049 INTEGER :: handle
7050 LOGICAL :: my_allow_noncompatible_grids
7051 REAL(KIND=dp) :: my_alpha, my_beta
7052
7053 CALL timeset(routinen, handle)
7054
7055 my_alpha = 1.0_dp
7056 IF (PRESENT(alpha)) my_alpha = alpha
7057
7058 my_beta = 1.0_dp
7059 IF (PRESENT(beta)) my_beta = beta
7060
7061 my_allow_noncompatible_grids = .false.
7062 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7063
7064 IF (my_beta /= 1.0_dp) THEN
7065 IF (my_beta == 0.0_dp) THEN
7066 CALL pw_zero(pw2)
7067 ELSE
7068!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7069 pw2%array = pw2%array*my_beta
7070!$OMP END PARALLEL WORKSHARE
7071 END IF
7072 END IF
7073
7074 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7075 IF (my_alpha == 1.0_dp) THEN
7076!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7077 pw2%array = pw2%array + real(pw1%array, kind=dp)
7078!$OMP END PARALLEL WORKSHARE
7079 ELSE IF (my_alpha /= 0.0_dp) THEN
7080!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7081 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7082!$OMP END PARALLEL WORKSHARE
7083 END IF
7084
7085 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7086
7087 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7088 cpabort("Noncommensurate grids not implemented for 3D grids!")
7089
7090 IF (my_alpha == 1.0_dp) THEN
7091!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7092 pw2%array = pw2%array + real(pw1%array, kind=dp)
7093!$OMP END PARALLEL WORKSHARE
7094 ELSE
7095!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7096 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7097!$OMP END PARALLEL WORKSHARE
7098 END IF
7099
7100 ELSE
7101
7102 cpabort("Grids not compatible")
7103
7104 END IF
7105
7106 CALL timestop(handle)
7107
7108 END SUBROUTINE pw_axpy_c3d_r3d_rs
7109
7110! **************************************************************************************************
7111!> \brief pw_out = pw_out + alpha * pw1 * pw2
7112!> alpha defaults to 1
7113!> \param pw_out ...
7114!> \param pw1 ...
7115!> \param pw2 ...
7116!> \param alpha ...
7117!> \author JGH
7118! **************************************************************************************************
7119 SUBROUTINE pw_multiply_c3d_r3d_rs (pw_out, pw1, pw2, alpha)
7120
7121 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw_out
7122 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7123 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7124 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7125
7126 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7127
7128 INTEGER :: handle
7129 REAL(KIND=dp) :: my_alpha
7130
7131 CALL timeset(routinen, handle)
7132
7133 my_alpha = 1.0_dp
7134 IF (PRESENT(alpha)) my_alpha = alpha
7135
7136 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7137 cpabort("pw_multiply not implemented for non-identical grids!")
7138
7139#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
7140 IF (my_alpha == 1.0_dp) THEN
7141!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7142 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7143!$OMP END PARALLEL WORKSHARE
7144 ELSE
7145!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7146 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7147!$OMP END PARALLEL WORKSHARE
7148 END IF
7149#else
7150 IF (my_alpha == 1.0_dp) THEN
7151 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7152 ELSE
7153 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7154 END IF
7155#endif
7156
7157 CALL timestop(handle)
7158
7159 END SUBROUTINE pw_multiply_c3d_r3d_rs
7160
7161! **************************************************************************************************
7162!> \brief ...
7163!> \param pw1 ...
7164!> \param pw2 ...
7165! **************************************************************************************************
7166 SUBROUTINE pw_multiply_with_c3d_r3d_rs (pw1, pw2)
7167 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw1
7168 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7169
7170 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7171
7172 INTEGER :: handle
7173
7174 CALL timeset(routinen, handle)
7175
7176 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7177 cpabort("Incompatible grids!")
7178
7179!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7180 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7181!$OMP END PARALLEL WORKSHARE
7182
7183 CALL timestop(handle)
7184
7185 END SUBROUTINE pw_multiply_with_c3d_r3d_rs
7186
7187! **************************************************************************************************
7188!> \brief Calculate integral over unit cell for functions in plane wave basis
7189!> only returns the real part of it ......
7190!> \param pw1 ...
7191!> \param pw2 ...
7192!> \param sumtype ...
7193!> \param just_sum ...
7194!> \param local_only ...
7195!> \return ...
7196!> \par History
7197!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7198!> \author apsi
7199! **************************************************************************************************
7200 FUNCTION pw_integral_ab_c3d_r3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7201
7202 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7203 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7204 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7205 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7206 REAL(kind=dp) :: integral_value
7207
7208 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_r3d_rs'
7209
7210 INTEGER :: handle, loc_sumtype
7211 LOGICAL :: my_just_sum, my_local_only
7212
7213 CALL timeset(routinen, handle)
7214
7215 loc_sumtype = do_accurate_sum
7216 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7217
7218 my_local_only = .false.
7219 IF (PRESENT(local_only)) my_local_only = local_only
7220
7221 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7222 cpabort("Grids incompatible")
7223 END IF
7224
7225 my_just_sum = .false.
7226 IF (PRESENT(just_sum)) my_just_sum = just_sum
7227
7228 ! do standard sum
7229 IF (loc_sumtype == do_standard_sum) THEN
7230
7231 ! Do standard sum
7232
7233 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7234
7235 ELSE
7236
7237 ! Do accurate sum
7238 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7239
7240 END IF
7241
7242 IF (.NOT. my_just_sum) THEN
7243 integral_value = integral_value*pw1%pw_grid%dvol
7244 END IF
7245
7246
7247 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7248 CALL pw1%pw_grid%para%group%sum(integral_value)
7249
7250 CALL timestop(handle)
7251
7252 END FUNCTION pw_integral_ab_c3d_r3d_rs
7253! **************************************************************************************************
7254!> \brief copy a pw type variable
7255!> \param pw1 ...
7256!> \param pw2 ...
7257!> \par History
7258!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7259!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7260!> JGH (21-Feb-2003) : Code for generalized reference grids
7261!> \author apsi
7262!> \note
7263!> Currently only copying of respective types allowed,
7264!> in order to avoid errors
7265! **************************************************************************************************
7266 SUBROUTINE pw_copy_c3d_r3d_gs (pw1, pw2)
7267
7268 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7269 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
7270
7271 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7272
7273 INTEGER :: handle
7274
7275 CALL timeset(routinen, handle)
7276
7277 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7278 cpabort("Both grids must be either spherical or non-spherical!")
7279
7280 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7281 cpabort("3D grids must be compatible!")
7282 IF (pw1%pw_grid%spherical) &
7283 cpabort("3D grids must not be spherical!")
7284!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7285 pw2%array(:, :, :) = real(pw1%array(:, :, :), kind=dp)
7286!$OMP END PARALLEL WORKSHARE
7287
7288 CALL timestop(handle)
7289
7290 END SUBROUTINE pw_copy_c3d_r3d_gs
7291
7292! **************************************************************************************************
7293!> \brief ...
7294!> \param pw ...
7295!> \param array ...
7296! **************************************************************************************************
7297 SUBROUTINE pw_copy_to_array_c3d_r3d_gs (pw, array)
7298 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7299 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7300
7301 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7302
7303 INTEGER :: handle
7304
7305 CALL timeset(routinen, handle)
7306
7307!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7308 array(:, :, :) = real(pw%array(:, :, :), kind=dp)
7309!$OMP END PARALLEL WORKSHARE
7310
7311 CALL timestop(handle)
7312 END SUBROUTINE pw_copy_to_array_c3d_r3d_gs
7313
7314! **************************************************************************************************
7315!> \brief ...
7316!> \param pw ...
7317!> \param array ...
7318! **************************************************************************************************
7319 SUBROUTINE pw_copy_from_array_c3d_r3d_gs (pw, array)
7320 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7321 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7322
7323 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7324
7325 INTEGER :: handle
7326
7327 CALL timeset(routinen, handle)
7328
7329!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7330 pw%array = cmplx(array, 0.0_dp, kind=dp)
7331!$OMP END PARALLEL WORKSHARE
7332
7333 CALL timestop(handle)
7334 END SUBROUTINE pw_copy_from_array_c3d_r3d_gs
7335
7336! **************************************************************************************************
7337!> \brief pw2 = alpha*pw1 + beta*pw2
7338!> alpha defaults to 1, beta defaults to 1
7339!> \param pw1 ...
7340!> \param pw2 ...
7341!> \param alpha ...
7342!> \param beta ...
7343!> \param allow_noncompatible_grids ...
7344!> \par History
7345!> JGH (21-Feb-2003) : added reference grid functionality
7346!> JGH (01-Dec-2007) : rename and remove complex alpha
7347!> \author apsi
7348!> \note
7349!> Currently only summing up of respective types allowed,
7350!> in order to avoid errors
7351! **************************************************************************************************
7352 SUBROUTINE pw_axpy_c3d_r3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7353
7354 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7355 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
7356 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7357 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7358
7359 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7360
7361 INTEGER :: handle
7362 LOGICAL :: my_allow_noncompatible_grids
7363 REAL(KIND=dp) :: my_alpha, my_beta
7364
7365 CALL timeset(routinen, handle)
7366
7367 my_alpha = 1.0_dp
7368 IF (PRESENT(alpha)) my_alpha = alpha
7369
7370 my_beta = 1.0_dp
7371 IF (PRESENT(beta)) my_beta = beta
7372
7373 my_allow_noncompatible_grids = .false.
7374 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7375
7376 IF (my_beta /= 1.0_dp) THEN
7377 IF (my_beta == 0.0_dp) THEN
7378 CALL pw_zero(pw2)
7379 ELSE
7380!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7381 pw2%array = pw2%array*my_beta
7382!$OMP END PARALLEL WORKSHARE
7383 END IF
7384 END IF
7385
7386 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7387 IF (my_alpha == 1.0_dp) THEN
7388!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7389 pw2%array = pw2%array + real(pw1%array, kind=dp)
7390!$OMP END PARALLEL WORKSHARE
7391 ELSE IF (my_alpha /= 0.0_dp) THEN
7392!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7393 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7394!$OMP END PARALLEL WORKSHARE
7395 END IF
7396
7397 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7398
7399 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7400 cpabort("Noncommensurate grids not implemented for 3D grids!")
7401
7402 IF (my_alpha == 1.0_dp) THEN
7403!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7404 pw2%array = pw2%array + real(pw1%array, kind=dp)
7405!$OMP END PARALLEL WORKSHARE
7406 ELSE
7407!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7408 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7409!$OMP END PARALLEL WORKSHARE
7410 END IF
7411
7412 ELSE
7413
7414 cpabort("Grids not compatible")
7415
7416 END IF
7417
7418 CALL timestop(handle)
7419
7420 END SUBROUTINE pw_axpy_c3d_r3d_gs
7421
7422! **************************************************************************************************
7423!> \brief pw_out = pw_out + alpha * pw1 * pw2
7424!> alpha defaults to 1
7425!> \param pw_out ...
7426!> \param pw1 ...
7427!> \param pw2 ...
7428!> \param alpha ...
7429!> \author JGH
7430! **************************************************************************************************
7431 SUBROUTINE pw_multiply_c3d_r3d_gs (pw_out, pw1, pw2, alpha)
7432
7433 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw_out
7434 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7435 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7436 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7437
7438 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7439
7440 INTEGER :: handle
7441 REAL(KIND=dp) :: my_alpha
7442
7443 CALL timeset(routinen, handle)
7444
7445 my_alpha = 1.0_dp
7446 IF (PRESENT(alpha)) my_alpha = alpha
7447
7448 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7449 cpabort("pw_multiply not implemented for non-identical grids!")
7450
7451#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
7452 IF (my_alpha == 1.0_dp) THEN
7453!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7454 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7455!$OMP END PARALLEL WORKSHARE
7456 ELSE
7457!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7458 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7459!$OMP END PARALLEL WORKSHARE
7460 END IF
7461#else
7462 IF (my_alpha == 1.0_dp) THEN
7463 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7464 ELSE
7465 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7466 END IF
7467#endif
7468
7469 CALL timestop(handle)
7470
7471 END SUBROUTINE pw_multiply_c3d_r3d_gs
7472
7473! **************************************************************************************************
7474!> \brief ...
7475!> \param pw1 ...
7476!> \param pw2 ...
7477! **************************************************************************************************
7478 SUBROUTINE pw_multiply_with_c3d_r3d_gs (pw1, pw2)
7479 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw1
7480 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7481
7482 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7483
7484 INTEGER :: handle
7485
7486 CALL timeset(routinen, handle)
7487
7488 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7489 cpabort("Incompatible grids!")
7490
7491!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7492 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7493!$OMP END PARALLEL WORKSHARE
7494
7495 CALL timestop(handle)
7496
7497 END SUBROUTINE pw_multiply_with_c3d_r3d_gs
7498
7499! **************************************************************************************************
7500!> \brief Calculate integral over unit cell for functions in plane wave basis
7501!> only returns the real part of it ......
7502!> \param pw1 ...
7503!> \param pw2 ...
7504!> \param sumtype ...
7505!> \param just_sum ...
7506!> \param local_only ...
7507!> \return ...
7508!> \par History
7509!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7510!> \author apsi
7511! **************************************************************************************************
7512 FUNCTION pw_integral_ab_c3d_r3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7513
7514 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7515 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7516 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7517 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7518 REAL(kind=dp) :: integral_value
7519
7520 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_r3d_gs'
7521
7522 INTEGER :: handle, loc_sumtype
7523 LOGICAL :: my_just_sum, my_local_only
7524
7525 CALL timeset(routinen, handle)
7526
7527 loc_sumtype = do_accurate_sum
7528 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7529
7530 my_local_only = .false.
7531 IF (PRESENT(local_only)) my_local_only = local_only
7532
7533 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7534 cpabort("Grids incompatible")
7535 END IF
7536
7537 my_just_sum = .false.
7538 IF (PRESENT(just_sum)) my_just_sum = just_sum
7539
7540 ! do standard sum
7541 IF (loc_sumtype == do_standard_sum) THEN
7542
7543 ! Do standard sum
7544
7545 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7546
7547 ELSE
7548
7549 ! Do accurate sum
7550 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7551
7552 END IF
7553
7554 IF (.NOT. my_just_sum) THEN
7555 integral_value = integral_value*pw1%pw_grid%vol
7556 END IF
7557
7558
7559 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7560 CALL pw1%pw_grid%para%group%sum(integral_value)
7561
7562 CALL timestop(handle)
7563
7564 END FUNCTION pw_integral_ab_c3d_r3d_gs
7565
7566! **************************************************************************************************
7567!> \brief copy a pw type variable
7568!> \param pw1 ...
7569!> \param pw2 ...
7570!> \par History
7571!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7572!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7573!> JGH (21-Feb-2003) : Code for generalized reference grids
7574!> \author apsi
7575!> \note
7576!> Currently only copying of respective types allowed,
7577!> in order to avoid errors
7578! **************************************************************************************************
7579 SUBROUTINE pw_copy_c3d_c3d_rs (pw1, pw2)
7580
7581 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7582 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
7583
7584 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7585
7586 INTEGER :: handle
7587
7588 CALL timeset(routinen, handle)
7589
7590 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7591 cpabort("Both grids must be either spherical or non-spherical!")
7592 IF (pw1%pw_grid%spherical) &
7593 cpabort("Spherical grids only exist in reciprocal space!")
7594
7595 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7596 cpabort("3D grids must be compatible!")
7597 IF (pw1%pw_grid%spherical) &
7598 cpabort("3D grids must not be spherical!")
7599!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7600 pw2%array(:, :, :) = pw1%array(:, :, :)
7601!$OMP END PARALLEL WORKSHARE
7602
7603 CALL timestop(handle)
7604
7605 END SUBROUTINE pw_copy_c3d_c3d_rs
7606
7607! **************************************************************************************************
7608!> \brief ...
7609!> \param pw ...
7610!> \param array ...
7611! **************************************************************************************************
7612 SUBROUTINE pw_copy_to_array_c3d_c3d_rs (pw, array)
7613 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7614 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7615
7616 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7617
7618 INTEGER :: handle
7619
7620 CALL timeset(routinen, handle)
7621
7622!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7623 array(:, :, :) = pw%array(:, :, :)
7624!$OMP END PARALLEL WORKSHARE
7625
7626 CALL timestop(handle)
7627 END SUBROUTINE pw_copy_to_array_c3d_c3d_rs
7628
7629! **************************************************************************************************
7630!> \brief ...
7631!> \param pw ...
7632!> \param array ...
7633! **************************************************************************************************
7634 SUBROUTINE pw_copy_from_array_c3d_c3d_rs (pw, array)
7635 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7636 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7637
7638 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7639
7640 INTEGER :: handle
7641
7642 CALL timeset(routinen, handle)
7643
7644!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7645 pw%array = array
7646!$OMP END PARALLEL WORKSHARE
7647
7648 CALL timestop(handle)
7649 END SUBROUTINE pw_copy_from_array_c3d_c3d_rs
7650
7651! **************************************************************************************************
7652!> \brief pw2 = alpha*pw1 + beta*pw2
7653!> alpha defaults to 1, beta defaults to 1
7654!> \param pw1 ...
7655!> \param pw2 ...
7656!> \param alpha ...
7657!> \param beta ...
7658!> \param allow_noncompatible_grids ...
7659!> \par History
7660!> JGH (21-Feb-2003) : added reference grid functionality
7661!> JGH (01-Dec-2007) : rename and remove complex alpha
7662!> \author apsi
7663!> \note
7664!> Currently only summing up of respective types allowed,
7665!> in order to avoid errors
7666! **************************************************************************************************
7667 SUBROUTINE pw_axpy_c3d_c3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7668
7669 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7670 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
7671 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7672 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7673
7674 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7675
7676 INTEGER :: handle
7677 LOGICAL :: my_allow_noncompatible_grids
7678 REAL(KIND=dp) :: my_alpha, my_beta
7679
7680 CALL timeset(routinen, handle)
7681
7682 my_alpha = 1.0_dp
7683 IF (PRESENT(alpha)) my_alpha = alpha
7684
7685 my_beta = 1.0_dp
7686 IF (PRESENT(beta)) my_beta = beta
7687
7688 my_allow_noncompatible_grids = .false.
7689 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7690
7691 IF (my_beta /= 1.0_dp) THEN
7692 IF (my_beta == 0.0_dp) THEN
7693 CALL pw_zero(pw2)
7694 ELSE
7695!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7696 pw2%array = pw2%array*my_beta
7697!$OMP END PARALLEL WORKSHARE
7698 END IF
7699 END IF
7700
7701 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7702 IF (my_alpha == 1.0_dp) THEN
7703!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7704 pw2%array = pw2%array + pw1%array
7705!$OMP END PARALLEL WORKSHARE
7706 ELSE IF (my_alpha /= 0.0_dp) THEN
7707!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7708 pw2%array = pw2%array + my_alpha* pw1%array
7709!$OMP END PARALLEL WORKSHARE
7710 END IF
7711
7712 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7713
7714 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7715 cpabort("Noncommensurate grids not implemented for 3D grids!")
7716
7717 IF (my_alpha == 1.0_dp) THEN
7718!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7719 pw2%array = pw2%array + pw1%array
7720!$OMP END PARALLEL WORKSHARE
7721 ELSE
7722!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7723 pw2%array = pw2%array + my_alpha* pw1%array
7724!$OMP END PARALLEL WORKSHARE
7725 END IF
7726
7727 ELSE
7728
7729 cpabort("Grids not compatible")
7730
7731 END IF
7732
7733 CALL timestop(handle)
7734
7735 END SUBROUTINE pw_axpy_c3d_c3d_rs
7736
7737! **************************************************************************************************
7738!> \brief pw_out = pw_out + alpha * pw1 * pw2
7739!> alpha defaults to 1
7740!> \param pw_out ...
7741!> \param pw1 ...
7742!> \param pw2 ...
7743!> \param alpha ...
7744!> \author JGH
7745! **************************************************************************************************
7746 SUBROUTINE pw_multiply_c3d_c3d_rs (pw_out, pw1, pw2, alpha)
7747
7748 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw_out
7749 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7750 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7751 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7752
7753 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7754
7755 INTEGER :: handle
7756 REAL(KIND=dp) :: my_alpha
7757
7758 CALL timeset(routinen, handle)
7759
7760 my_alpha = 1.0_dp
7761 IF (PRESENT(alpha)) my_alpha = alpha
7762
7763 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7764 cpabort("pw_multiply not implemented for non-identical grids!")
7765
7766#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
7767 IF (my_alpha == 1.0_dp) THEN
7768!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7769 pw_out%array = pw_out%array + pw1%array* pw2%array
7770!$OMP END PARALLEL WORKSHARE
7771 ELSE
7772!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7773 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
7774!$OMP END PARALLEL WORKSHARE
7775 END IF
7776#else
7777 IF (my_alpha == 1.0_dp) THEN
7778 pw_out%array = pw_out%array + pw1%array* pw2%array
7779 ELSE
7780 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
7781 END IF
7782#endif
7783
7784 CALL timestop(handle)
7785
7786 END SUBROUTINE pw_multiply_c3d_c3d_rs
7787
7788! **************************************************************************************************
7789!> \brief ...
7790!> \param pw1 ...
7791!> \param pw2 ...
7792! **************************************************************************************************
7793 SUBROUTINE pw_multiply_with_c3d_c3d_rs (pw1, pw2)
7794 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw1
7795 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7796
7797 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7798
7799 INTEGER :: handle
7800
7801 CALL timeset(routinen, handle)
7802
7803 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7804 cpabort("Incompatible grids!")
7805
7806!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7807 pw1%array = pw1%array* pw2%array
7808!$OMP END PARALLEL WORKSHARE
7809
7810 CALL timestop(handle)
7811
7812 END SUBROUTINE pw_multiply_with_c3d_c3d_rs
7813
7814! **************************************************************************************************
7815!> \brief Calculate integral over unit cell for functions in plane wave basis
7816!> only returns the real part of it ......
7817!> \param pw1 ...
7818!> \param pw2 ...
7819!> \param sumtype ...
7820!> \param just_sum ...
7821!> \param local_only ...
7822!> \return ...
7823!> \par History
7824!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7825!> \author apsi
7826! **************************************************************************************************
7827 FUNCTION pw_integral_ab_c3d_c3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7828
7829 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7830 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7831 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7832 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7833 REAL(kind=dp) :: integral_value
7834
7835 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_c3d_rs'
7836
7837 INTEGER :: handle, loc_sumtype
7838 LOGICAL :: my_just_sum, my_local_only
7839
7840 CALL timeset(routinen, handle)
7841
7842 loc_sumtype = do_accurate_sum
7843 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7844
7845 my_local_only = .false.
7846 IF (PRESENT(local_only)) my_local_only = local_only
7847
7848 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7849 cpabort("Grids incompatible")
7850 END IF
7851
7852 my_just_sum = .false.
7853 IF (PRESENT(just_sum)) my_just_sum = just_sum
7854
7855 ! do standard sum
7856 IF (loc_sumtype == do_standard_sum) THEN
7857
7858 ! Do standard sum
7859
7860 integral_value = sum(real(conjg(pw1%array) &
7861 *pw2%array, kind=dp)) !? complex bit
7862
7863 ELSE
7864
7865 ! Do accurate sum
7866 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
7867
7868 END IF
7869
7870 IF (.NOT. my_just_sum) THEN
7871 integral_value = integral_value*pw1%pw_grid%dvol
7872 END IF
7873
7874
7875 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7876 CALL pw1%pw_grid%para%group%sum(integral_value)
7877
7878 CALL timestop(handle)
7879
7880 END FUNCTION pw_integral_ab_c3d_c3d_rs
7881! **************************************************************************************************
7882!> \brief copy a pw type variable
7883!> \param pw1 ...
7884!> \param pw2 ...
7885!> \par History
7886!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7887!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7888!> JGH (21-Feb-2003) : Code for generalized reference grids
7889!> \author apsi
7890!> \note
7891!> Currently only copying of respective types allowed,
7892!> in order to avoid errors
7893! **************************************************************************************************
7894 SUBROUTINE pw_copy_c3d_c3d_gs (pw1, pw2)
7895
7896 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7897 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
7898
7899 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7900
7901 INTEGER :: handle
7902
7903 CALL timeset(routinen, handle)
7904
7905 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7906 cpabort("Both grids must be either spherical or non-spherical!")
7907
7908 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7909 cpabort("3D grids must be compatible!")
7910 IF (pw1%pw_grid%spherical) &
7911 cpabort("3D grids must not be spherical!")
7912!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7913 pw2%array(:, :, :) = pw1%array(:, :, :)
7914!$OMP END PARALLEL WORKSHARE
7915
7916 CALL timestop(handle)
7917
7918 END SUBROUTINE pw_copy_c3d_c3d_gs
7919
7920! **************************************************************************************************
7921!> \brief ...
7922!> \param pw ...
7923!> \param array ...
7924! **************************************************************************************************
7925 SUBROUTINE pw_copy_to_array_c3d_c3d_gs (pw, array)
7926 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7927 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7928
7929 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7930
7931 INTEGER :: handle
7932
7933 CALL timeset(routinen, handle)
7934
7935!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7936 array(:, :, :) = pw%array(:, :, :)
7937!$OMP END PARALLEL WORKSHARE
7938
7939 CALL timestop(handle)
7940 END SUBROUTINE pw_copy_to_array_c3d_c3d_gs
7941
7942! **************************************************************************************************
7943!> \brief ...
7944!> \param pw ...
7945!> \param array ...
7946! **************************************************************************************************
7947 SUBROUTINE pw_copy_from_array_c3d_c3d_gs (pw, array)
7948 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7949 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7950
7951 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7952
7953 INTEGER :: handle
7954
7955 CALL timeset(routinen, handle)
7956
7957!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7958 pw%array = array
7959!$OMP END PARALLEL WORKSHARE
7960
7961 CALL timestop(handle)
7962 END SUBROUTINE pw_copy_from_array_c3d_c3d_gs
7963
7964! **************************************************************************************************
7965!> \brief pw2 = alpha*pw1 + beta*pw2
7966!> alpha defaults to 1, beta defaults to 1
7967!> \param pw1 ...
7968!> \param pw2 ...
7969!> \param alpha ...
7970!> \param beta ...
7971!> \param allow_noncompatible_grids ...
7972!> \par History
7973!> JGH (21-Feb-2003) : added reference grid functionality
7974!> JGH (01-Dec-2007) : rename and remove complex alpha
7975!> \author apsi
7976!> \note
7977!> Currently only summing up of respective types allowed,
7978!> in order to avoid errors
7979! **************************************************************************************************
7980 SUBROUTINE pw_axpy_c3d_c3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7981
7982 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7983 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
7984 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7985 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7986
7987 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7988
7989 INTEGER :: handle
7990 LOGICAL :: my_allow_noncompatible_grids
7991 REAL(KIND=dp) :: my_alpha, my_beta
7992
7993 CALL timeset(routinen, handle)
7994
7995 my_alpha = 1.0_dp
7996 IF (PRESENT(alpha)) my_alpha = alpha
7997
7998 my_beta = 1.0_dp
7999 IF (PRESENT(beta)) my_beta = beta
8000
8001 my_allow_noncompatible_grids = .false.
8002 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
8003
8004 IF (my_beta /= 1.0_dp) THEN
8005 IF (my_beta == 0.0_dp) THEN
8006 CALL pw_zero(pw2)
8007 ELSE
8008!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
8009 pw2%array = pw2%array*my_beta
8010!$OMP END PARALLEL WORKSHARE
8011 END IF
8012 END IF
8013
8014 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8015 IF (my_alpha == 1.0_dp) THEN
8016!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
8017 pw2%array = pw2%array + pw1%array
8018!$OMP END PARALLEL WORKSHARE
8019 ELSE IF (my_alpha /= 0.0_dp) THEN
8020!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
8021 pw2%array = pw2%array + my_alpha* pw1%array
8022!$OMP END PARALLEL WORKSHARE
8023 END IF
8024
8025 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
8026
8027 IF (any(shape(pw1%array) /= shape(pw2%array))) &
8028 cpabort("Noncommensurate grids not implemented for 3D grids!")
8029
8030 IF (my_alpha == 1.0_dp) THEN
8031!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
8032 pw2%array = pw2%array + pw1%array
8033!$OMP END PARALLEL WORKSHARE
8034 ELSE
8035!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
8036 pw2%array = pw2%array + my_alpha* pw1%array
8037!$OMP END PARALLEL WORKSHARE
8038 END IF
8039
8040 ELSE
8041
8042 cpabort("Grids not compatible")
8043
8044 END IF
8045
8046 CALL timestop(handle)
8047
8048 END SUBROUTINE pw_axpy_c3d_c3d_gs
8049
8050! **************************************************************************************************
8051!> \brief pw_out = pw_out + alpha * pw1 * pw2
8052!> alpha defaults to 1
8053!> \param pw_out ...
8054!> \param pw1 ...
8055!> \param pw2 ...
8056!> \param alpha ...
8057!> \author JGH
8058! **************************************************************************************************
8059 SUBROUTINE pw_multiply_c3d_c3d_gs (pw_out, pw1, pw2, alpha)
8060
8061 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw_out
8062 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
8063 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
8064 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
8065
8066 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
8067
8068 INTEGER :: handle
8069 REAL(KIND=dp) :: my_alpha
8070
8071 CALL timeset(routinen, handle)
8072
8073 my_alpha = 1.0_dp
8074 IF (PRESENT(alpha)) my_alpha = alpha
8075
8076 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
8077 cpabort("pw_multiply not implemented for non-identical grids!")
8078
8079#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
8080 IF (my_alpha == 1.0_dp) THEN
8081!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
8082 pw_out%array = pw_out%array + pw1%array* pw2%array
8083!$OMP END PARALLEL WORKSHARE
8084 ELSE
8085!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
8086 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
8087!$OMP END PARALLEL WORKSHARE
8088 END IF
8089#else
8090 IF (my_alpha == 1.0_dp) THEN
8091 pw_out%array = pw_out%array + pw1%array* pw2%array
8092 ELSE
8093 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
8094 END IF
8095#endif
8096
8097 CALL timestop(handle)
8098
8099 END SUBROUTINE pw_multiply_c3d_c3d_gs
8100
8101! **************************************************************************************************
8102!> \brief ...
8103!> \param pw1 ...
8104!> \param pw2 ...
8105! **************************************************************************************************
8106 SUBROUTINE pw_multiply_with_c3d_c3d_gs (pw1, pw2)
8107 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw1
8108 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
8109
8110 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
8111
8112 INTEGER :: handle
8113
8114 CALL timeset(routinen, handle)
8115
8116 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
8117 cpabort("Incompatible grids!")
8118
8119!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
8120 pw1%array = pw1%array* pw2%array
8121!$OMP END PARALLEL WORKSHARE
8122
8123 CALL timestop(handle)
8124
8125 END SUBROUTINE pw_multiply_with_c3d_c3d_gs
8126
8127! **************************************************************************************************
8128!> \brief Calculate integral over unit cell for functions in plane wave basis
8129!> only returns the real part of it ......
8130!> \param pw1 ...
8131!> \param pw2 ...
8132!> \param sumtype ...
8133!> \param just_sum ...
8134!> \param local_only ...
8135!> \return ...
8136!> \par History
8137!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
8138!> \author apsi
8139! **************************************************************************************************
8140 FUNCTION pw_integral_ab_c3d_c3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
8141
8142 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
8143 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
8144 INTEGER, INTENT(IN), OPTIONAL :: sumtype
8145 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
8146 REAL(kind=dp) :: integral_value
8147
8148 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_c3d_gs'
8149
8150 INTEGER :: handle, loc_sumtype
8151 LOGICAL :: my_just_sum, my_local_only
8152
8153 CALL timeset(routinen, handle)
8154
8155 loc_sumtype = do_accurate_sum
8156 IF (PRESENT(sumtype)) loc_sumtype = sumtype
8157
8158 my_local_only = .false.
8159 IF (PRESENT(local_only)) my_local_only = local_only
8160
8161 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8162 cpabort("Grids incompatible")
8163 END IF
8164
8165 my_just_sum = .false.
8166 IF (PRESENT(just_sum)) my_just_sum = just_sum
8167
8168 ! do standard sum
8169 IF (loc_sumtype == do_standard_sum) THEN
8170
8171 ! Do standard sum
8172
8173 integral_value = sum(real(conjg(pw1%array) &
8174 *pw2%array, kind=dp)) !? complex bit
8175
8176 ELSE
8177
8178 ! Do accurate sum
8179 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
8180
8181 END IF
8182
8183 IF (.NOT. my_just_sum) THEN
8184 integral_value = integral_value*pw1%pw_grid%vol
8185 END IF
8186
8187
8188 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
8189 CALL pw1%pw_grid%para%group%sum(integral_value)
8190
8191 CALL timestop(handle)
8192
8193 END FUNCTION pw_integral_ab_c3d_c3d_gs
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207! **************************************************************************************************
8208!> \brief Gathers the pw vector from a 3d data field
8209!> \param pw ...
8210!> \param c ...
8211!> \param scale ...
8212!> \par History
8213!> none
8214!> \author JGH
8215! **************************************************************************************************
8216 SUBROUTINE pw_gather_s_r1d_r3d_2(pw1, pw2, scale)
8217
8218 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
8219 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
8220 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8221
8222 CALL pw_gather_s_r1d_r3d (pw2, pw1%array, scale)
8223
8224 END SUBROUTINE pw_gather_s_r1d_r3d_2
8225
8226! **************************************************************************************************
8227!> \brief Gathers the pw vector from a 3d data field
8228!> \param pw ...
8229!> \param c ...
8230!> \param scale ...
8231!> \par History
8232!> none
8233!> \author JGH
8234! **************************************************************************************************
8235 SUBROUTINE pw_gather_s_r1d_r3d (pw, c, scale)
8236
8237 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
8238 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
8239 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8240
8241 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
8242
8243 INTEGER :: gpt, handle, l, m, n
8244
8245 CALL timeset(routinen, handle)
8246
8247 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8248 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
8249
8250 IF (PRESENT(scale)) THEN
8251!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8252 DO gpt = 1, ngpts
8253 l = mapl(ghat(1, gpt)) + 1
8254 m = mapm(ghat(2, gpt)) + 1
8255 n = mapn(ghat(3, gpt)) + 1
8256 pw%array(gpt) = scale* c(l, m, n)
8257 END DO
8258!$OMP END PARALLEL DO
8259 ELSE
8260!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8261 DO gpt = 1, ngpts
8262 l = mapl(ghat(1, gpt)) + 1
8263 m = mapm(ghat(2, gpt)) + 1
8264 n = mapn(ghat(3, gpt)) + 1
8265 pw%array(gpt) = c(l, m, n)
8266 END DO
8267!$OMP END PARALLEL DO
8268 END IF
8269
8270 END associate
8271
8272 CALL timestop(handle)
8273
8274 END SUBROUTINE pw_gather_s_r1d_r3d
8275
8276! **************************************************************************************************
8277!> \brief Scatters a pw vector to a 3d data field
8278!> \param pw ...
8279!> \param c ...
8280!> \param scale ...
8281!> \par History
8282!> none
8283!> \author JGH
8284! **************************************************************************************************
8285 SUBROUTINE pw_scatter_s_r1d_r3d_2(pw1, pw2, scale)
8286
8287 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
8288 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
8289 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8290
8291 CALL pw_scatter_s_r1d_r3d (pw1, pw2%array, scale)
8292
8293 END SUBROUTINE pw_scatter_s_r1d_r3d_2
8294
8295! **************************************************************************************************
8296!> \brief Scatters a pw vector to a 3d data field
8297!> \param pw ...
8298!> \param c ...
8299!> \param scale ...
8300!> \par History
8301!> none
8302!> \author JGH
8303! **************************************************************************************************
8304 SUBROUTINE pw_scatter_s_r1d_r3d (pw, c, scale)
8305
8306 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
8307 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
8308 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8309
8310 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
8311
8312 INTEGER :: gpt, handle, l, m, n
8313
8314 CALL timeset(routinen, handle)
8315
8316 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8317 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8318
8319 ! should only zero the unused bits (but the zero is needed)
8320 IF (.NOT. PRESENT(scale)) c = 0.0_dp
8321
8322 IF (PRESENT(scale)) THEN
8323!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8324 DO gpt = 1, ngpts
8325 l = mapl(ghat(1, gpt)) + 1
8326 m = mapm(ghat(2, gpt)) + 1
8327 n = mapn(ghat(3, gpt)) + 1
8328 c(l, m, n) = scale* pw%array(gpt)
8329 END DO
8330!$OMP END PARALLEL DO
8331 ELSE
8332!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8333 DO gpt = 1, ngpts
8334 l = mapl(ghat(1, gpt)) + 1
8335 m = mapm(ghat(2, gpt)) + 1
8336 n = mapn(ghat(3, gpt)) + 1
8337 c(l, m, n) = pw%array(gpt)
8338 END DO
8339!$OMP END PARALLEL DO
8340 END IF
8341
8342 END associate
8343
8344 IF (pw%pw_grid%grid_span == halfspace) THEN
8345
8346 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
8347 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8348
8349 IF (PRESENT(scale)) THEN
8350!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8351 DO gpt = 1, ngpts
8352 l = mapl(ghat(1, gpt)) + 1
8353 m = mapm(ghat(2, gpt)) + 1
8354 n = mapn(ghat(3, gpt)) + 1
8355 c(l, m, n) = scale*( pw%array(gpt))
8356 END DO
8357!$OMP END PARALLEL DO
8358 ELSE
8359!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8360 DO gpt = 1, ngpts
8361 l = mapl(ghat(1, gpt)) + 1
8362 m = mapm(ghat(2, gpt)) + 1
8363 n = mapn(ghat(3, gpt)) + 1
8364 c(l, m, n) = ( pw%array(gpt))
8365 END DO
8366!$OMP END PARALLEL DO
8367 END IF
8368
8369 END associate
8370
8371 END IF
8372
8373 CALL timestop(handle)
8374
8375 END SUBROUTINE pw_scatter_s_r1d_r3d
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386
8387! **************************************************************************************************
8388!> \brief Gathers the pw vector from a 3d data field
8389!> \param pw ...
8390!> \param c ...
8391!> \param scale ...
8392!> \par History
8393!> none
8394!> \author JGH
8395! **************************************************************************************************
8396 SUBROUTINE pw_gather_s_r1d_c3d_2(pw1, pw2, scale)
8397
8398 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
8399 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
8400 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8401
8402 CALL pw_gather_s_r1d_c3d (pw2, pw1%array, scale)
8403
8404 END SUBROUTINE pw_gather_s_r1d_c3d_2
8405
8406! **************************************************************************************************
8407!> \brief Gathers the pw vector from a 3d data field
8408!> \param pw ...
8409!> \param c ...
8410!> \param scale ...
8411!> \par History
8412!> none
8413!> \author JGH
8414! **************************************************************************************************
8415 SUBROUTINE pw_gather_s_r1d_c3d (pw, c, scale)
8416
8417 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
8418 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
8419 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8420
8421 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
8422
8423 INTEGER :: gpt, handle, l, m, n
8424
8425 CALL timeset(routinen, handle)
8426
8427 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8428 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
8429
8430 IF (PRESENT(scale)) THEN
8431!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8432 DO gpt = 1, ngpts
8433 l = mapl(ghat(1, gpt)) + 1
8434 m = mapm(ghat(2, gpt)) + 1
8435 n = mapn(ghat(3, gpt)) + 1
8436 pw%array(gpt) = scale* real(c(l, m, n), kind=dp)
8437 END DO
8438!$OMP END PARALLEL DO
8439 ELSE
8440!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8441 DO gpt = 1, ngpts
8442 l = mapl(ghat(1, gpt)) + 1
8443 m = mapm(ghat(2, gpt)) + 1
8444 n = mapn(ghat(3, gpt)) + 1
8445 pw%array(gpt) = real(c(l, m, n), kind=dp)
8446 END DO
8447!$OMP END PARALLEL DO
8448 END IF
8449
8450 END associate
8451
8452 CALL timestop(handle)
8453
8454 END SUBROUTINE pw_gather_s_r1d_c3d
8455
8456! **************************************************************************************************
8457!> \brief Scatters a pw vector to a 3d data field
8458!> \param pw ...
8459!> \param c ...
8460!> \param scale ...
8461!> \par History
8462!> none
8463!> \author JGH
8464! **************************************************************************************************
8465 SUBROUTINE pw_scatter_s_r1d_c3d_2(pw1, pw2, scale)
8466
8467 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
8468 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
8469 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8470
8471 CALL pw_scatter_s_r1d_c3d (pw1, pw2%array, scale)
8472
8473 END SUBROUTINE pw_scatter_s_r1d_c3d_2
8474
8475! **************************************************************************************************
8476!> \brief Scatters a pw vector to a 3d data field
8477!> \param pw ...
8478!> \param c ...
8479!> \param scale ...
8480!> \par History
8481!> none
8482!> \author JGH
8483! **************************************************************************************************
8484 SUBROUTINE pw_scatter_s_r1d_c3d (pw, c, scale)
8485
8486 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
8487 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
8488 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8489
8490 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
8491
8492 INTEGER :: gpt, handle, l, m, n
8493
8494 CALL timeset(routinen, handle)
8495
8496 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8497 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8498
8499 ! should only zero the unused bits (but the zero is needed)
8500 IF (.NOT. PRESENT(scale)) c = 0.0_dp
8501
8502 IF (PRESENT(scale)) THEN
8503!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8504 DO gpt = 1, ngpts
8505 l = mapl(ghat(1, gpt)) + 1
8506 m = mapm(ghat(2, gpt)) + 1
8507 n = mapn(ghat(3, gpt)) + 1
8508 c(l, m, n) = scale* cmplx(pw%array(gpt), 0.0_dp, kind=dp)
8509 END DO
8510!$OMP END PARALLEL DO
8511 ELSE
8512!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8513 DO gpt = 1, ngpts
8514 l = mapl(ghat(1, gpt)) + 1
8515 m = mapm(ghat(2, gpt)) + 1
8516 n = mapn(ghat(3, gpt)) + 1
8517 c(l, m, n) = cmplx(pw%array(gpt), 0.0_dp, kind=dp)
8518 END DO
8519!$OMP END PARALLEL DO
8520 END IF
8521
8522 END associate
8523
8524 IF (pw%pw_grid%grid_span == halfspace) THEN
8525
8526 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
8527 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8528
8529 IF (PRESENT(scale)) THEN
8530!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8531 DO gpt = 1, ngpts
8532 l = mapl(ghat(1, gpt)) + 1
8533 m = mapm(ghat(2, gpt)) + 1
8534 n = mapn(ghat(3, gpt)) + 1
8535 c(l, m, n) = scale*( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
8536 END DO
8537!$OMP END PARALLEL DO
8538 ELSE
8539!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8540 DO gpt = 1, ngpts
8541 l = mapl(ghat(1, gpt)) + 1
8542 m = mapm(ghat(2, gpt)) + 1
8543 n = mapn(ghat(3, gpt)) + 1
8544 c(l, m, n) = ( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
8545 END DO
8546!$OMP END PARALLEL DO
8547 END IF
8548
8549 END associate
8550
8551 END IF
8552
8553 CALL timestop(handle)
8554
8555 END SUBROUTINE pw_scatter_s_r1d_c3d
8556
8557
8558
8559
8560
8561
8562
8563
8564
8565
8566
8567
8568! **************************************************************************************************
8569!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8570!> \param pw1 ...
8571!> \param pw2 ...
8572!> \param debug ...
8573!> \par History
8574!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8575!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8576!> pw_types, no more coefficient types
8577!> \author apsi
8578!> \note
8579!> fft_wrap_pw1pw2
8580! **************************************************************************************************
8581 SUBROUTINE fft_wrap_pw1pw2_r3d_c1d_rs_gs (pw1, pw2, debug)
8582
8583 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
8584 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
8585 LOGICAL, INTENT(IN), OPTIONAL :: debug
8586
8587 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8588
8589 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8590 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8591 INTEGER :: handle, handle2, my_pos, nrays, &
8592 out_unit
8593 INTEGER, DIMENSION(3) :: nloc
8594#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8595 LOGICAL :: use_pw_gpu
8596#endif
8597 INTEGER, DIMENSION(:), POINTER :: n
8598 LOGICAL :: test
8599
8600 CALL timeset(routinen, handle2)
8601 out_unit = cp_logger_get_default_io_unit()
8602 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8603 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8604
8605 NULLIFY (c_in)
8606 NULLIFY (c_out)
8607
8608 IF (PRESENT(debug)) THEN
8609 test = debug
8610 ELSE
8611 test = .false.
8612 END IF
8613
8614 !..check if grids are compatible
8615 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8616 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8617 cpabort("PW grids not compatible")
8618 END IF
8619 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8620 cpabort("PW grids have not compatible MPI groups")
8621 END IF
8622 END IF
8623
8624 n => pw1%pw_grid%npts
8625
8626 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8627
8628 !
8629 !..replicated data, use local FFT
8630 !
8631
8632 IF (test .AND. out_unit > 0) THEN
8633 WRITE (out_unit, '(A)') " FFT Protocol "
8634 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8635 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8636 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8637 END IF
8638
8639#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8640 CALL pw_gpu_r3dc1d_3d(pw1, pw2)
8641#elif defined (__PW_FPGA)
8642 ALLOCATE (c_out(n(1), n(2), n(3)))
8643 ! check if bitstream for the fft size is present
8644 ! if not, perform fft3d in CPU
8645 IF (pw_fpga_init_bitstream(n) == 1) THEN
8646 CALL pw_copy_to_array(pw1, c_out)
8647#if (__PW_FPGA_SP && __PW_FPGA)
8648 CALL pw_fpga_r3dc1d_3d_sp(n, c_out)
8649#else
8650 CALL pw_fpga_r3dc1d_3d_dp(n, c_out)
8651#endif
8652 CALL zdscal(n(1)*n(2)*n(3), 1.0_dp/pw1%pw_grid%ngpts, c_out, 1)
8653 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8654 ELSE
8655 CALL pw_copy_to_array(pw1, c_out)
8656 CALL fft3d(fwfft, n, c_out, debug=test)
8657 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8658 END IF
8659 DEALLOCATE (c_out)
8660#else
8661 ALLOCATE (c_out(n(1), n(2), n(3)))
8662 c_out = 0.0_dp
8663 CALL pw_copy_to_array(pw1, c_out)
8664 CALL fft3d(fwfft, n, c_out, debug=test)
8665 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8666 DEALLOCATE (c_out)
8667#endif
8668
8669 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8670
8671 ELSE
8672
8673 !
8674 !..parallel FFT
8675 !
8676
8677 IF (test .AND. out_unit > 0) THEN
8678 WRITE (out_unit, '(A)') " FFT Protocol "
8679 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8680 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8681 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8682 END IF
8683
8684 my_pos = pw1%pw_grid%para%group%mepos
8685 nrays = pw1%pw_grid%para%nyzray(my_pos)
8686 grays => pw1%pw_grid%grays
8687
8688#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8689 ! (no ray dist. is not efficient in CUDA)
8690 use_pw_gpu = pw1%pw_grid%para%ray_distribution
8691 IF (use_pw_gpu) THEN
8692 CALL pw_gpu_r3dc1d_3d_ps(pw1, pw2)
8693 ELSE
8694#endif
8695!.. prepare input
8696 nloc = pw1%pw_grid%npts_local
8697 ALLOCATE (c_in(nloc(1), nloc(2), nloc(3)))
8698 CALL pw_copy_to_array(pw1, c_in)
8699 grays = z_zero
8700 !..transform
8701 IF (pw1%pw_grid%para%ray_distribution) THEN
8702 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8703 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
8704 pw1%pw_grid%para%bo, debug=test)
8705 ELSE
8706 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8707 pw1%pw_grid%para%bo, debug=test)
8708 END IF
8709 !..prepare output
8710 IF (test .AND. out_unit > 0) &
8711 WRITE (out_unit, '(A)') " PW_GATHER : 2d -> 1d "
8712 CALL pw_gather_p_c1d (pw2, grays)
8713 DEALLOCATE (c_in)
8714
8715#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8716 END IF
8717#endif
8718 END IF
8719
8720 IF (test .AND. out_unit > 0) THEN
8721 WRITE (out_unit, '(A)') " End of FFT Protocol "
8722 END IF
8723
8724 CALL timestop(handle)
8725 CALL timestop(handle2)
8726
8727 END SUBROUTINE fft_wrap_pw1pw2_r3d_c1d_rs_gs
8728
8729
8730
8731
8732
8733! **************************************************************************************************
8734!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8735!> \param pw1 ...
8736!> \param pw2 ...
8737!> \param debug ...
8738!> \par History
8739!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8740!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8741!> pw_types, no more coefficient types
8742!> \author apsi
8743!> \note
8744!> fft_wrap_pw1pw2
8745! **************************************************************************************************
8746 SUBROUTINE fft_wrap_pw1pw2_r3d_c3d_rs_gs (pw1, pw2, debug)
8747
8748 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
8749 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
8750 LOGICAL, INTENT(IN), OPTIONAL :: debug
8751
8752 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8753
8754 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8755 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8756 INTEGER :: handle, handle2, my_pos, nrays, &
8757 out_unit
8758 INTEGER, DIMENSION(:), POINTER :: n
8759 LOGICAL :: test
8760
8761 CALL timeset(routinen, handle2)
8762 out_unit = cp_logger_get_default_io_unit()
8763 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8764 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8765
8766 NULLIFY (c_in)
8767 NULLIFY (c_out)
8768
8769 IF (PRESENT(debug)) THEN
8770 test = debug
8771 ELSE
8772 test = .false.
8773 END IF
8774
8775 !..check if grids are compatible
8776 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8777 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8778 cpabort("PW grids not compatible")
8779 END IF
8780 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8781 cpabort("PW grids have not compatible MPI groups")
8782 END IF
8783 END IF
8784
8785 n => pw1%pw_grid%npts
8786
8787 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8788
8789 !
8790 !..replicated data, use local FFT
8791 !
8792
8793 IF (test .AND. out_unit > 0) THEN
8794 WRITE (out_unit, '(A)') " FFT Protocol "
8795 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8796 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8797 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8798 END IF
8799
8800 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
8801 c_out => pw2%array
8802 CALL fft3d(fwfft, n, c_out, debug=test)
8803
8804 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8805
8806 ELSE
8807
8808 !
8809 !..parallel FFT
8810 !
8811
8812 IF (test .AND. out_unit > 0) THEN
8813 WRITE (out_unit, '(A)') " FFT Protocol "
8814 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8815 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8816 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8817 END IF
8818
8819 my_pos = pw1%pw_grid%para%group%mepos
8820 nrays = pw1%pw_grid%para%nyzray(my_pos)
8821 grays => pw1%pw_grid%grays
8822
8823 END IF
8824
8825 IF (test .AND. out_unit > 0) THEN
8826 WRITE (out_unit, '(A)') " End of FFT Protocol "
8827 END IF
8828
8829 CALL timestop(handle)
8830 CALL timestop(handle2)
8831
8832 END SUBROUTINE fft_wrap_pw1pw2_r3d_c3d_rs_gs
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842
8843
8844! **************************************************************************************************
8845!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8846!> \param pw1 ...
8847!> \param pw2 ...
8848!> \param debug ...
8849!> \par History
8850!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8851!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8852!> pw_types, no more coefficient types
8853!> \author apsi
8854!> \note
8855!> fft_wrap_pw1pw2
8856! **************************************************************************************************
8857 SUBROUTINE fft_wrap_pw1pw2_c1d_r3d_gs_rs (pw1, pw2, debug)
8858
8859 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
8860 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
8861 LOGICAL, INTENT(IN), OPTIONAL :: debug
8862
8863 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8864
8865 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8866 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8867 INTEGER :: handle, handle2, my_pos, nrays, &
8868 out_unit
8869 INTEGER, DIMENSION(3) :: nloc
8870#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8871 LOGICAL :: use_pw_gpu
8872#endif
8873 INTEGER, DIMENSION(:), POINTER :: n
8874 LOGICAL :: test
8875
8876 CALL timeset(routinen, handle2)
8877 out_unit = cp_logger_get_default_io_unit()
8878 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8879 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8880
8881 NULLIFY (c_in)
8882 NULLIFY (c_out)
8883
8884 IF (PRESENT(debug)) THEN
8885 test = debug
8886 ELSE
8887 test = .false.
8888 END IF
8889
8890 !..check if grids are compatible
8891 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8892 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8893 cpabort("PW grids not compatible")
8894 END IF
8895 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8896 cpabort("PW grids have not compatible MPI groups")
8897 END IF
8898 END IF
8899
8900 n => pw1%pw_grid%npts
8901
8902 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8903
8904 !
8905 !..replicated data, use local FFT
8906 !
8907
8908 IF (test .AND. out_unit > 0) THEN
8909 WRITE (out_unit, '(A)') " FFT Protocol "
8910 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
8911 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
8912 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
8913 END IF
8914
8915#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8916 CALL pw_gpu_c1dr3d_3d(pw1, pw2)
8917#elif defined (__PW_FPGA)
8918 ALLOCATE (c_out(n(1), n(2), n(3)))
8919 ! check if bitstream for the fft size is present
8920 ! if not, perform fft3d in CPU
8921 IF (pw_fpga_init_bitstream(n) == 1) THEN
8922 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8923 ! transform using FPGA
8924#if (__PW_FPGA_SP && __PW_FPGA)
8925 CALL pw_fpga_c1dr3d_3d_sp(n, c_out)
8926#else
8927 CALL pw_fpga_c1dr3d_3d_dp(n, c_out)
8928#endif
8929 CALL zdscal(n(1)*n(2)*n(3), 1.0_dp, c_out, 1)
8930 ! use real part only
8931 CALL pw_copy_from_array(pw2, c_out)
8932 ELSE
8933 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
8934 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8935 ! transform
8936 CALL fft3d(bwfft, n, c_out, debug=test)
8937 ! use real part only
8938 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
8939 CALL pw_copy_from_array(pw2, c_out)
8940 END IF
8941 DEALLOCATE (c_out)
8942#else
8943 ALLOCATE (c_out(n(1), n(2), n(3)))
8944 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
8945 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8946 ! transform
8947 CALL fft3d(bwfft, n, c_out, debug=test)
8948 ! use real part only
8949 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
8950 CALL pw_copy_from_array(pw2, c_out)
8951 DEALLOCATE (c_out)
8952#endif
8953
8954 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8955
8956 ELSE
8957
8958 !
8959 !..parallel FFT
8960 !
8961
8962 IF (test .AND. out_unit > 0) THEN
8963 WRITE (out_unit, '(A)') " FFT Protocol "
8964 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
8965 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
8966 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
8967 END IF
8968
8969 my_pos = pw1%pw_grid%para%group%mepos
8970 nrays = pw1%pw_grid%para%nyzray(my_pos)
8971 grays => pw1%pw_grid%grays
8972
8973#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8974 ! (no ray dist. is not efficient in CUDA)
8975 use_pw_gpu = pw1%pw_grid%para%ray_distribution
8976 IF (use_pw_gpu) THEN
8977 CALL pw_gpu_c1dr3d_3d_ps(pw1, pw2)
8978 ELSE
8979#endif
8980!.. prepare input
8981 IF (test .AND. out_unit > 0) &
8982 WRITE (out_unit, '(A)') " PW_SCATTER : 2d -> 1d "
8983 grays = z_zero
8984 CALL pw_scatter_p_c1d (pw1, grays)
8985 nloc = pw2%pw_grid%npts_local
8986 ALLOCATE (c_in(nloc(1), nloc(2), nloc(3)))
8987 !..transform
8988 IF (pw1%pw_grid%para%ray_distribution) THEN
8989 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8990 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
8991 pw1%pw_grid%para%bo, debug=test)
8992 ELSE
8993 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8994 pw1%pw_grid%para%bo, debug=test)
8995 END IF
8996 !..prepare output
8997 IF (test .AND. out_unit > 0) &
8998 WRITE (out_unit, '(A)') " Real part "
8999 CALL pw_copy_from_array(pw2, c_in)
9000 DEALLOCATE (c_in)
9001#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
9002 END IF
9003#endif
9004 END IF
9005
9006 IF (test .AND. out_unit > 0) THEN
9007 WRITE (out_unit, '(A)') " End of FFT Protocol "
9008 END IF
9009
9010 CALL timestop(handle)
9011 CALL timestop(handle2)
9012
9013 END SUBROUTINE fft_wrap_pw1pw2_c1d_r3d_gs_rs
9014
9015
9016
9017! **************************************************************************************************
9018!> \brief Gathers the pw vector from a 3d data field
9019!> \param pw ...
9020!> \param c ...
9021!> \param scale ...
9022!> \par History
9023!> none
9024!> \author JGH
9025! **************************************************************************************************
9026 SUBROUTINE pw_gather_s_c1d_r3d_2(pw1, pw2, scale)
9027
9028 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
9029 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9030 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9031
9032 CALL pw_gather_s_c1d_r3d (pw2, pw1%array, scale)
9033
9034 END SUBROUTINE pw_gather_s_c1d_r3d_2
9035
9036! **************************************************************************************************
9037!> \brief Gathers the pw vector from a 3d data field
9038!> \param pw ...
9039!> \param c ...
9040!> \param scale ...
9041!> \par History
9042!> none
9043!> \author JGH
9044! **************************************************************************************************
9045 SUBROUTINE pw_gather_s_c1d_r3d (pw, c, scale)
9046
9047 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9048 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
9049 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9050
9051 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
9052
9053 INTEGER :: gpt, handle, l, m, n
9054
9055 CALL timeset(routinen, handle)
9056
9057 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9058 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
9059
9060 IF (PRESENT(scale)) THEN
9061!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9062 DO gpt = 1, ngpts
9063 l = mapl(ghat(1, gpt)) + 1
9064 m = mapm(ghat(2, gpt)) + 1
9065 n = mapn(ghat(3, gpt)) + 1
9066 pw%array(gpt) = scale* cmplx(c(l, m, n), 0.0_dp, kind=dp)
9067 END DO
9068!$OMP END PARALLEL DO
9069 ELSE
9070!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9071 DO gpt = 1, ngpts
9072 l = mapl(ghat(1, gpt)) + 1
9073 m = mapm(ghat(2, gpt)) + 1
9074 n = mapn(ghat(3, gpt)) + 1
9075 pw%array(gpt) = cmplx(c(l, m, n), 0.0_dp, kind=dp)
9076 END DO
9077!$OMP END PARALLEL DO
9078 END IF
9079
9080 END associate
9081
9082 CALL timestop(handle)
9083
9084 END SUBROUTINE pw_gather_s_c1d_r3d
9085
9086! **************************************************************************************************
9087!> \brief Scatters a pw vector to a 3d data field
9088!> \param pw ...
9089!> \param c ...
9090!> \param scale ...
9091!> \par History
9092!> none
9093!> \author JGH
9094! **************************************************************************************************
9095 SUBROUTINE pw_scatter_s_c1d_r3d_2(pw1, pw2, scale)
9096
9097 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9098 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
9099 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9100
9101 CALL pw_scatter_s_c1d_r3d (pw1, pw2%array, scale)
9102
9103 END SUBROUTINE pw_scatter_s_c1d_r3d_2
9104
9105! **************************************************************************************************
9106!> \brief Scatters a pw vector to a 3d data field
9107!> \param pw ...
9108!> \param c ...
9109!> \param scale ...
9110!> \par History
9111!> none
9112!> \author JGH
9113! **************************************************************************************************
9114 SUBROUTINE pw_scatter_s_c1d_r3d (pw, c, scale)
9115
9116 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9117 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
9118 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9119
9120 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
9121
9122 INTEGER :: gpt, handle, l, m, n
9123
9124 CALL timeset(routinen, handle)
9125
9126 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9127 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9128
9129 ! should only zero the unused bits (but the zero is needed)
9130 IF (.NOT. PRESENT(scale)) c = 0.0_dp
9131
9132 IF (PRESENT(scale)) THEN
9133!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9134 DO gpt = 1, ngpts
9135 l = mapl(ghat(1, gpt)) + 1
9136 m = mapm(ghat(2, gpt)) + 1
9137 n = mapn(ghat(3, gpt)) + 1
9138 c(l, m, n) = scale* real(pw%array(gpt), kind=dp)
9139 END DO
9140!$OMP END PARALLEL DO
9141 ELSE
9142!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9143 DO gpt = 1, ngpts
9144 l = mapl(ghat(1, gpt)) + 1
9145 m = mapm(ghat(2, gpt)) + 1
9146 n = mapn(ghat(3, gpt)) + 1
9147 c(l, m, n) = real(pw%array(gpt), kind=dp)
9148 END DO
9149!$OMP END PARALLEL DO
9150 END IF
9151
9152 END associate
9153
9154 IF (pw%pw_grid%grid_span == halfspace) THEN
9155
9156 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
9157 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9158
9159 IF (PRESENT(scale)) THEN
9160!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9161 DO gpt = 1, ngpts
9162 l = mapl(ghat(1, gpt)) + 1
9163 m = mapm(ghat(2, gpt)) + 1
9164 n = mapn(ghat(3, gpt)) + 1
9165 c(l, m, n) = scale*( real(pw%array(gpt), kind=dp))
9166 END DO
9167!$OMP END PARALLEL DO
9168 ELSE
9169!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9170 DO gpt = 1, ngpts
9171 l = mapl(ghat(1, gpt)) + 1
9172 m = mapm(ghat(2, gpt)) + 1
9173 n = mapn(ghat(3, gpt)) + 1
9174 c(l, m, n) = ( real(pw%array(gpt), kind=dp))
9175 END DO
9176!$OMP END PARALLEL DO
9177 END IF
9178
9179 END associate
9180
9181 END IF
9182
9183 CALL timestop(handle)
9184
9185 END SUBROUTINE pw_scatter_s_c1d_r3d
9186
9187
9188
9189
9190
9191
9192
9193
9194! **************************************************************************************************
9195!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9196!> \param pw1 ...
9197!> \param pw2 ...
9198!> \param debug ...
9199!> \par History
9200!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9201!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9202!> pw_types, no more coefficient types
9203!> \author apsi
9204!> \note
9205!> fft_wrap_pw1pw2
9206! **************************************************************************************************
9207 SUBROUTINE fft_wrap_pw1pw2_c1d_c3d_gs_rs (pw1, pw2, debug)
9208
9209 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9210 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
9211 LOGICAL, INTENT(IN), OPTIONAL :: debug
9212
9213 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9214
9215 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9216 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9217 INTEGER :: handle, handle2, my_pos, nrays, &
9218 out_unit
9219 INTEGER, DIMENSION(:), POINTER :: n
9220 LOGICAL :: test
9221
9222 CALL timeset(routinen, handle2)
9223 out_unit = cp_logger_get_default_io_unit()
9224 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9225 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9226
9227 NULLIFY (c_in)
9228 NULLIFY (c_out)
9229
9230 IF (PRESENT(debug)) THEN
9231 test = debug
9232 ELSE
9233 test = .false.
9234 END IF
9235
9236 !..check if grids are compatible
9237 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9238 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9239 cpabort("PW grids not compatible")
9240 END IF
9241 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9242 cpabort("PW grids have not compatible MPI groups")
9243 END IF
9244 END IF
9245
9246 n => pw1%pw_grid%npts
9247
9248 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9249
9250 !
9251 !..replicated data, use local FFT
9252 !
9253
9254 IF (test .AND. out_unit > 0) THEN
9255 WRITE (out_unit, '(A)') " FFT Protocol "
9256 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9257 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9258 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9259 END IF
9260
9261 c_out => pw2%array
9262 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
9263 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
9264 CALL fft3d(bwfft, n, c_out, debug=test)
9265
9266 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9267
9268 ELSE
9269
9270 !
9271 !..parallel FFT
9272 !
9273
9274 IF (test .AND. out_unit > 0) THEN
9275 WRITE (out_unit, '(A)') " FFT Protocol "
9276 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9277 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9278 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9279 END IF
9280
9281 my_pos = pw1%pw_grid%para%group%mepos
9282 nrays = pw1%pw_grid%para%nyzray(my_pos)
9283 grays => pw1%pw_grid%grays
9284
9285 !..prepare input
9286 IF (test .AND. out_unit > 0) &
9287 WRITE (out_unit, '(A)') " PW_SCATTER : 2d -> 1d "
9288 grays = z_zero
9289 CALL pw_scatter_p_c1d (pw1, grays)
9290 c_in => pw2%array
9291 !..transform
9292 IF (pw1%pw_grid%para%ray_distribution) THEN
9293 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9294 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
9295 pw1%pw_grid%para%bo, debug=test)
9296 ELSE
9297 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9298 pw1%pw_grid%para%bo, debug=test)
9299 END IF
9300 !..prepare output (nothing to do)
9301 END IF
9302
9303 IF (test .AND. out_unit > 0) THEN
9304 WRITE (out_unit, '(A)') " End of FFT Protocol "
9305 END IF
9306
9307 CALL timestop(handle)
9308 CALL timestop(handle2)
9309
9310 END SUBROUTINE fft_wrap_pw1pw2_c1d_c3d_gs_rs
9311
9312
9313
9314! **************************************************************************************************
9315!> \brief Gathers the pw vector from a 3d data field
9316!> \param pw ...
9317!> \param c ...
9318!> \param scale ...
9319!> \par History
9320!> none
9321!> \author JGH
9322! **************************************************************************************************
9323 SUBROUTINE pw_gather_s_c1d_c3d_2(pw1, pw2, scale)
9324
9325 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9326 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9327 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9328
9329 CALL pw_gather_s_c1d_c3d (pw2, pw1%array, scale)
9330
9331 END SUBROUTINE pw_gather_s_c1d_c3d_2
9332
9333! **************************************************************************************************
9334!> \brief Gathers the pw vector from a 3d data field
9335!> \param pw ...
9336!> \param c ...
9337!> \param scale ...
9338!> \par History
9339!> none
9340!> \author JGH
9341! **************************************************************************************************
9342 SUBROUTINE pw_gather_s_c1d_c3d (pw, c, scale)
9343
9344 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9345 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
9346 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9347
9348 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
9349
9350 INTEGER :: gpt, handle, l, m, n
9351
9352 CALL timeset(routinen, handle)
9353
9354 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9355 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
9356
9357 IF (PRESENT(scale)) THEN
9358!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9359 DO gpt = 1, ngpts
9360 l = mapl(ghat(1, gpt)) + 1
9361 m = mapm(ghat(2, gpt)) + 1
9362 n = mapn(ghat(3, gpt)) + 1
9363 pw%array(gpt) = scale* c(l, m, n)
9364 END DO
9365!$OMP END PARALLEL DO
9366 ELSE
9367!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9368 DO gpt = 1, ngpts
9369 l = mapl(ghat(1, gpt)) + 1
9370 m = mapm(ghat(2, gpt)) + 1
9371 n = mapn(ghat(3, gpt)) + 1
9372 pw%array(gpt) = c(l, m, n)
9373 END DO
9374!$OMP END PARALLEL DO
9375 END IF
9376
9377 END associate
9378
9379 CALL timestop(handle)
9380
9381 END SUBROUTINE pw_gather_s_c1d_c3d
9382
9383! **************************************************************************************************
9384!> \brief Scatters a pw vector to a 3d data field
9385!> \param pw ...
9386!> \param c ...
9387!> \param scale ...
9388!> \par History
9389!> none
9390!> \author JGH
9391! **************************************************************************************************
9392 SUBROUTINE pw_scatter_s_c1d_c3d_2(pw1, pw2, scale)
9393
9394 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9395 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
9396 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9397
9398 CALL pw_scatter_s_c1d_c3d (pw1, pw2%array, scale)
9399
9400 END SUBROUTINE pw_scatter_s_c1d_c3d_2
9401
9402! **************************************************************************************************
9403!> \brief Scatters a pw vector to a 3d data field
9404!> \param pw ...
9405!> \param c ...
9406!> \param scale ...
9407!> \par History
9408!> none
9409!> \author JGH
9410! **************************************************************************************************
9411 SUBROUTINE pw_scatter_s_c1d_c3d (pw, c, scale)
9412
9413 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9414 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
9415 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9416
9417 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
9418
9419 INTEGER :: gpt, handle, l, m, n
9420
9421 CALL timeset(routinen, handle)
9422
9423 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9424 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9425
9426 ! should only zero the unused bits (but the zero is needed)
9427 IF (.NOT. PRESENT(scale)) c = 0.0_dp
9428
9429 IF (PRESENT(scale)) THEN
9430!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9431 DO gpt = 1, ngpts
9432 l = mapl(ghat(1, gpt)) + 1
9433 m = mapm(ghat(2, gpt)) + 1
9434 n = mapn(ghat(3, gpt)) + 1
9435 c(l, m, n) = scale* pw%array(gpt)
9436 END DO
9437!$OMP END PARALLEL DO
9438 ELSE
9439!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9440 DO gpt = 1, ngpts
9441 l = mapl(ghat(1, gpt)) + 1
9442 m = mapm(ghat(2, gpt)) + 1
9443 n = mapn(ghat(3, gpt)) + 1
9444 c(l, m, n) = pw%array(gpt)
9445 END DO
9446!$OMP END PARALLEL DO
9447 END IF
9448
9449 END associate
9450
9451 IF (pw%pw_grid%grid_span == halfspace) THEN
9452
9453 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
9454 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9455
9456 IF (PRESENT(scale)) THEN
9457!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9458 DO gpt = 1, ngpts
9459 l = mapl(ghat(1, gpt)) + 1
9460 m = mapm(ghat(2, gpt)) + 1
9461 n = mapn(ghat(3, gpt)) + 1
9462 c(l, m, n) = scale*conjg( pw%array(gpt))
9463 END DO
9464!$OMP END PARALLEL DO
9465 ELSE
9466!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9467 DO gpt = 1, ngpts
9468 l = mapl(ghat(1, gpt)) + 1
9469 m = mapm(ghat(2, gpt)) + 1
9470 n = mapn(ghat(3, gpt)) + 1
9471 c(l, m, n) = conjg( pw%array(gpt))
9472 END DO
9473!$OMP END PARALLEL DO
9474 END IF
9475
9476 END associate
9477
9478 END IF
9479
9480 CALL timestop(handle)
9481
9482 END SUBROUTINE pw_scatter_s_c1d_c3d
9483
9484
9485
9486
9487
9488
9489
9490
9491! **************************************************************************************************
9492!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9493!> \param pw1 ...
9494!> \param pw2 ...
9495!> \param debug ...
9496!> \par History
9497!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9498!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9499!> pw_types, no more coefficient types
9500!> \author apsi
9501!> \note
9502!> fft_wrap_pw1pw2
9503! **************************************************************************************************
9504 SUBROUTINE fft_wrap_pw1pw2_c3d_r3d_gs_rs (pw1, pw2, debug)
9505
9506 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9507 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
9508 LOGICAL, INTENT(IN), OPTIONAL :: debug
9509
9510 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9511
9512 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9513 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9514 INTEGER :: handle, handle2, my_pos, nrays, &
9515 out_unit
9516 INTEGER, DIMENSION(:), POINTER :: n
9517 LOGICAL :: test
9518
9519 CALL timeset(routinen, handle2)
9520 out_unit = cp_logger_get_default_io_unit()
9521 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9522 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9523
9524 NULLIFY (c_in)
9525 NULLIFY (c_out)
9526
9527 IF (PRESENT(debug)) THEN
9528 test = debug
9529 ELSE
9530 test = .false.
9531 END IF
9532
9533 !..check if grids are compatible
9534 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9535 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9536 cpabort("PW grids not compatible")
9537 END IF
9538 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9539 cpabort("PW grids have not compatible MPI groups")
9540 END IF
9541 END IF
9542
9543 n => pw1%pw_grid%npts
9544
9545 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9546
9547 !
9548 !..replicated data, use local FFT
9549 !
9550
9551 IF (test .AND. out_unit > 0) THEN
9552 WRITE (out_unit, '(A)') " FFT Protocol "
9553 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9554 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9555 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9556 END IF
9557
9558 c_in => pw1%array
9559 ALLOCATE (c_out(n(1), n(2), n(3)))
9560 CALL fft3d(bwfft, n, c_in, c_out, debug=test)
9561 ! use real part only
9562 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
9563 pw2%array = real(c_out, kind=dp)
9564 DEALLOCATE (c_out)
9565
9566 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9567
9568 ELSE
9569
9570 !
9571 !..parallel FFT
9572 !
9573
9574 IF (test .AND. out_unit > 0) THEN
9575 WRITE (out_unit, '(A)') " FFT Protocol "
9576 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9577 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9578 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9579 END IF
9580
9581 my_pos = pw1%pw_grid%para%group%mepos
9582 nrays = pw1%pw_grid%para%nyzray(my_pos)
9583 grays => pw1%pw_grid%grays
9584
9585 END IF
9586
9587 IF (test .AND. out_unit > 0) THEN
9588 WRITE (out_unit, '(A)') " End of FFT Protocol "
9589 END IF
9590
9591 CALL timestop(handle)
9592 CALL timestop(handle2)
9593
9594 END SUBROUTINE fft_wrap_pw1pw2_c3d_r3d_gs_rs
9595
9596
9597
9598
9599! **************************************************************************************************
9600!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9601!> \param pw1 ...
9602!> \param pw2 ...
9603!> \param debug ...
9604!> \par History
9605!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9606!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9607!> pw_types, no more coefficient types
9608!> \author apsi
9609!> \note
9610!> fft_wrap_pw1pw2
9611! **************************************************************************************************
9612 SUBROUTINE fft_wrap_pw1pw2_c3d_c1d_rs_gs (pw1, pw2, debug)
9613
9614 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
9615 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9616 LOGICAL, INTENT(IN), OPTIONAL :: debug
9617
9618 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9619
9620 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9621 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9622 INTEGER :: handle, handle2, my_pos, nrays, &
9623 out_unit
9624 INTEGER, DIMENSION(:), POINTER :: n
9625 LOGICAL :: test
9626
9627 CALL timeset(routinen, handle2)
9628 out_unit = cp_logger_get_default_io_unit()
9629 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9630 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9631
9632 NULLIFY (c_in)
9633 NULLIFY (c_out)
9634
9635 IF (PRESENT(debug)) THEN
9636 test = debug
9637 ELSE
9638 test = .false.
9639 END IF
9640
9641 !..check if grids are compatible
9642 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9643 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9644 cpabort("PW grids not compatible")
9645 END IF
9646 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9647 cpabort("PW grids have not compatible MPI groups")
9648 END IF
9649 END IF
9650
9651 n => pw1%pw_grid%npts
9652
9653 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9654
9655 !
9656 !..replicated data, use local FFT
9657 !
9658
9659 IF (test .AND. out_unit > 0) THEN
9660 WRITE (out_unit, '(A)') " FFT Protocol "
9661 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9662 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9663 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9664 END IF
9665
9666 c_in => pw1%array
9667 ALLOCATE (c_out(n(1), n(2), n(3)))
9668 ! transform
9669 CALL fft3d(fwfft, n, c_in, c_out, debug=test)
9670 ! gather results
9671 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_GATHER : 3d -> 1d "
9672 CALL pw_gather_s_c1d_c3d(pw2, c_out)
9673 DEALLOCATE (c_out)
9674
9675 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9676
9677 ELSE
9678
9679 !
9680 !..parallel FFT
9681 !
9682
9683 IF (test .AND. out_unit > 0) THEN
9684 WRITE (out_unit, '(A)') " FFT Protocol "
9685 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9686 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9687 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9688 END IF
9689
9690 my_pos = pw1%pw_grid%para%group%mepos
9691 nrays = pw1%pw_grid%para%nyzray(my_pos)
9692 grays => pw1%pw_grid%grays
9693
9694 !..prepare input
9695 c_in => pw1%array
9696 grays = z_zero
9697 !..transform
9698 IF (pw1%pw_grid%para%ray_distribution) THEN
9699 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9700 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
9701 pw1%pw_grid%para%bo, debug=test)
9702 ELSE
9703 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9704 pw1%pw_grid%para%bo, debug=test)
9705 END IF
9706 !..prepare output
9707 IF (test .AND. out_unit > 0) &
9708 WRITE (out_unit, '(A)') " PW_GATHER : 2d -> 1d "
9709 CALL pw_gather_p_c1d (pw2, grays)
9710 END IF
9711
9712 IF (test .AND. out_unit > 0) THEN
9713 WRITE (out_unit, '(A)') " End of FFT Protocol "
9714 END IF
9715
9716 CALL timestop(handle)
9717 CALL timestop(handle2)
9718
9719 END SUBROUTINE fft_wrap_pw1pw2_c3d_c1d_rs_gs
9720
9721
9722
9723
9724
9725! **************************************************************************************************
9726!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9727!> \param pw1 ...
9728!> \param pw2 ...
9729!> \param debug ...
9730!> \par History
9731!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9732!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9733!> pw_types, no more coefficient types
9734!> \author apsi
9735!> \note
9736!> fft_wrap_pw1pw2
9737! **************************************************************************************************
9738 SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_rs_gs (pw1, pw2, debug)
9739
9740 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
9741 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
9742 LOGICAL, INTENT(IN), OPTIONAL :: debug
9743
9744 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9745
9746 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9747 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9748 INTEGER :: handle, handle2, my_pos, nrays, &
9749 out_unit
9750 INTEGER, DIMENSION(:), POINTER :: n
9751 LOGICAL :: test
9752
9753 CALL timeset(routinen, handle2)
9754 out_unit = cp_logger_get_default_io_unit()
9755 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9756 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9757
9758 NULLIFY (c_in)
9759 NULLIFY (c_out)
9760
9761 IF (PRESENT(debug)) THEN
9762 test = debug
9763 ELSE
9764 test = .false.
9765 END IF
9766
9767 !..check if grids are compatible
9768 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9769 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9770 cpabort("PW grids not compatible")
9771 END IF
9772 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9773 cpabort("PW grids have not compatible MPI groups")
9774 END IF
9775 END IF
9776
9777 n => pw1%pw_grid%npts
9778
9779 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9780
9781 !
9782 !..replicated data, use local FFT
9783 !
9784
9785 IF (test .AND. out_unit > 0) THEN
9786 WRITE (out_unit, '(A)') " FFT Protocol "
9787 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9788 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9789 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9790 END IF
9791
9792 c_in => pw1%array
9793 c_out => pw2%array
9794 CALL fft3d(fwfft, n, c_in, c_out, debug=test)
9795
9796 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9797
9798 ELSE
9799
9800 !
9801 !..parallel FFT
9802 !
9803
9804 IF (test .AND. out_unit > 0) THEN
9805 WRITE (out_unit, '(A)') " FFT Protocol "
9806 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9807 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9808 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9809 END IF
9810
9811 my_pos = pw1%pw_grid%para%group%mepos
9812 nrays = pw1%pw_grid%para%nyzray(my_pos)
9813 grays => pw1%pw_grid%grays
9814
9815 END IF
9816
9817 IF (test .AND. out_unit > 0) THEN
9818 WRITE (out_unit, '(A)') " End of FFT Protocol "
9819 END IF
9820
9821 CALL timestop(handle)
9822 CALL timestop(handle2)
9823
9824 END SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_rs_gs
9825
9826! **************************************************************************************************
9827!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9828!> \param pw1 ...
9829!> \param pw2 ...
9830!> \param debug ...
9831!> \par History
9832!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9833!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9834!> pw_types, no more coefficient types
9835!> \author apsi
9836!> \note
9837!> fft_wrap_pw1pw2
9838! **************************************************************************************************
9839 SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_gs_rs (pw1, pw2, debug)
9840
9841 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9842 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
9843 LOGICAL, INTENT(IN), OPTIONAL :: debug
9844
9845 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9846
9847 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9848 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9849 INTEGER :: handle, handle2, my_pos, nrays, &
9850 out_unit
9851 INTEGER, DIMENSION(:), POINTER :: n
9852 LOGICAL :: test
9853
9854 CALL timeset(routinen, handle2)
9855 out_unit = cp_logger_get_default_io_unit()
9856 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9857 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9858
9859 NULLIFY (c_in)
9860 NULLIFY (c_out)
9861
9862 IF (PRESENT(debug)) THEN
9863 test = debug
9864 ELSE
9865 test = .false.
9866 END IF
9867
9868 !..check if grids are compatible
9869 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9870 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9871 cpabort("PW grids not compatible")
9872 END IF
9873 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9874 cpabort("PW grids have not compatible MPI groups")
9875 END IF
9876 END IF
9877
9878 n => pw1%pw_grid%npts
9879
9880 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9881
9882 !
9883 !..replicated data, use local FFT
9884 !
9885
9886 IF (test .AND. out_unit > 0) THEN
9887 WRITE (out_unit, '(A)') " FFT Protocol "
9888 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9889 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9890 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9891 END IF
9892
9893 c_in => pw1%array
9894 c_out => pw2%array
9895 CALL fft3d(bwfft, n, c_in, c_out, debug=test)
9896
9897 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9898
9899 ELSE
9900
9901 !
9902 !..parallel FFT
9903 !
9904
9905 IF (test .AND. out_unit > 0) THEN
9906 WRITE (out_unit, '(A)') " FFT Protocol "
9907 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9908 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9909 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9910 END IF
9911
9912 my_pos = pw1%pw_grid%para%group%mepos
9913 nrays = pw1%pw_grid%para%nyzray(my_pos)
9914 grays => pw1%pw_grid%grays
9915
9916 END IF
9917
9918 IF (test .AND. out_unit > 0) THEN
9919 WRITE (out_unit, '(A)') " End of FFT Protocol "
9920 END IF
9921
9922 CALL timestop(handle)
9923 CALL timestop(handle2)
9924
9925 END SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_gs_rs
9926
9927
9928
9929! **************************************************************************************************
9930!> \brief Multiply all data points with a Gaussian damping factor
9931!> Needed for longrange Coulomb potential
9932!> V(\vec r)=erf(omega*r)/r
9933!> V(\vec g)=\frac{4*\pi}{g**2}*exp(-g**2/omega**2)
9934!> \param pw ...
9935!> \param omega ...
9936!> \par History
9937!> Frederick Stein (12-04-2019) created
9938!> \author Frederick Stein (12-Apr-2019)
9939!> \note
9940!> Performs a Gaussian damping
9941! **************************************************************************************************
9942 SUBROUTINE pw_gauss_damp(pw, omega)
9943
9944 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9945 REAL(kind=dp), INTENT(IN) :: omega
9946
9947 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gauss_damp'
9948
9949 INTEGER :: handle, i
9950 REAL(kind=dp) :: omega_2, tmp
9951
9952 CALL timeset(routinen, handle)
9953 cpassert(omega >= 0)
9954
9955 omega_2 = omega*omega
9956 omega_2 = 0.25_dp/omega_2
9957
9958!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp) SHARED(pw,omega_2)
9959 DO i = 1, SIZE(pw%array)
9960 tmp = exp(-pw%pw_grid%gsq(i)*omega_2)
9961 pw%array(i) = pw%array(i)*tmp
9962 END DO
9963!$OMP END PARALLEL DO
9964
9965 CALL timestop(handle)
9966
9967 END SUBROUTINE pw_gauss_damp
9968
9969! **************************************************************************************************
9970!> \brief Multiply all data points with the logarithmic derivative of a Gaussian
9971!> \param pw ...
9972!> \param omega ...
9973!> \note
9974!> Performs a Gaussian damping
9975! **************************************************************************************************
9976 SUBROUTINE pw_log_deriv_gauss(pw, omega)
9977
9978 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9979 REAL(kind=dp), INTENT(IN) :: omega
9980
9981 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_gauss'
9982
9983 INTEGER :: handle, i
9984 REAL(kind=dp) :: omega_2, tmp
9985
9986 CALL timeset(routinen, handle)
9987 cpassert(omega >= 0)
9988
9989 omega_2 = omega*omega
9990 omega_2 = 0.25_dp/omega_2
9991
9992!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp) SHARED(pw,omega_2)
9993 DO i = 1, SIZE(pw%array)
9994 tmp = 1.0_dp + omega_2*pw%pw_grid%gsq(i)
9995 pw%array(i) = pw%array(i)*tmp
9996 END DO
9997!$OMP END PARALLEL DO
9998
9999 CALL timestop(handle)
10000 END SUBROUTINE pw_log_deriv_gauss
10001
10002! **************************************************************************************************
10003!> \brief Multiply all data points with a Gaussian damping factor
10004!> Needed for longrange Coulomb potential
10005!> V(\vec r)=erf(omega*r)/r
10006!> V(\vec g)=\frac{4*\pi}{g**2}*exp(-g**2/omega**2)
10007!> \param pw ...
10008!> \param omega ...
10009!> \par History
10010!> Frederick Stein (12-04-2019) created
10011!> \author Frederick Stein (12-Apr-2019)
10012!> \note
10013!> Performs a Gaussian damping
10014! **************************************************************************************************
10015 SUBROUTINE pw_compl_gauss_damp(pw, omega)
10016
10017 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10018 REAL(kind=dp), INTENT(IN) :: omega
10019
10020 CHARACTER(len=*), PARAMETER :: routinen = 'pw_compl_gauss_damp'
10021
10022 INTEGER :: cnt, handle, i
10023 REAL(kind=dp) :: omega_2, tmp, tmp2
10024
10025 CALL timeset(routinen, handle)
10026
10027 omega_2 = omega*omega
10028 omega_2 = 0.25_dp/omega_2
10029
10030 cnt = SIZE(pw%array)
10031
10032!$OMP PARALLEL DO PRIVATE(i, tmp, tmp2) DEFAULT(NONE) SHARED(cnt, pw,omega_2)
10033 DO i = 1, cnt
10034 tmp = -omega_2*pw%pw_grid%gsq(i)
10035 IF (abs(tmp) > 1.0e-5_dp) THEN
10036 tmp2 = 1.0_dp - exp(tmp)
10037 ELSE
10038 tmp2 = tmp + 0.5_dp*tmp*(tmp + (1.0_dp/3.0_dp)*tmp**2)
10039 END IF
10040 pw%array(i) = pw%array(i)*tmp2
10041 END DO
10042!$OMP END PARALLEL DO
10043
10044 CALL timestop(handle)
10045
10046 END SUBROUTINE pw_compl_gauss_damp
10047
10048! **************************************************************************************************
10049!> \brief Multiply all data points with the logarithmic derivative of the complementary Gaussian damping factor
10050!> \param pw ...
10051!> \param omega ...
10052!> \note
10053! **************************************************************************************************
10054 SUBROUTINE pw_log_deriv_compl_gauss(pw, omega)
10055
10056 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
10057 REAL(kind=dp), INTENT(IN) :: omega
10058
10059 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_compl_gauss'
10060
10061 INTEGER :: handle, i
10062 REAL(kind=dp) :: omega_2, tmp, tmp2
10063
10064 CALL timeset(routinen, handle)
10065
10066 omega_2 = omega*omega
10067 omega_2 = 0.25_dp/omega_2
10068
10069!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp,tmp2) &
10070!$OMP SHARED(pw,omega_2)
10071 DO i = 1, SIZE(pw%array)
10072 tmp = omega_2*pw%pw_grid%gsq(i)
10073 ! For too small arguments, use the Taylor polynomial to prevent division by zero
10074 IF (abs(tmp) >= 0.003_dp) THEN
10075 tmp2 = 1.0_dp - tmp*exp(-tmp)/(1.0_dp - exp(-tmp))
10076 ELSE
10077 tmp2 = 0.5_dp*tmp - tmp**2/12.0_dp
10078 END IF
10079 pw%array(i) = pw%array(i)*tmp2
10080 END DO
10081!$OMP END PARALLEL DO
10082
10083 CALL timestop(handle)
10084
10085 END SUBROUTINE pw_log_deriv_compl_gauss
10086
10087! **************************************************************************************************
10088!> \brief Multiply all data points with a Gaussian damping factor and mixes it with the original function
10089!> Needed for mixed longrange/Coulomb potential
10090!> V(\vec r)=(a+b*erf(omega*r))/r
10091!> V(\vec g)=\frac{4*\pi}{g**2}*(a+b*exp(-g**2/omega**2))
10092!> \param pw ...
10093!> \param omega ...
10094!> \param scale_coul ...
10095!> \param scale_long ...
10096!> \par History
10097!> Frederick Stein (16-Dec-2021) created
10098!> \author Frederick Stein (16-Dec-2021)
10099!> \note
10100!> Performs a Gaussian damping
10101! **************************************************************************************************
10102 SUBROUTINE pw_gauss_damp_mix(pw, omega, scale_coul, scale_long)
10103
10104 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10105 REAL(kind=dp), INTENT(IN) :: omega, scale_coul, scale_long
10106
10107 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gauss_damp_mix'
10108
10109 INTEGER :: handle, i
10110 REAL(kind=dp) :: omega_2, tmp
10111
10112 CALL timeset(routinen, handle)
10113
10114 omega_2 = omega*omega
10115 omega_2 = 0.25_dp/omega_2
10116
10117!$OMP PARALLEL DO DEFAULT(NONE) SHARED(pw, omega_2, scale_coul, scale_long) PRIVATE(i,tmp)
10118 DO i = 1, SIZE(pw%array)
10119 tmp = scale_coul + scale_long*exp(-pw%pw_grid%gsq(i)*omega_2)
10120 pw%array(i) = pw%array(i)*tmp
10121 END DO
10122!$OMP END PARALLEL DO
10123
10124 CALL timestop(handle)
10125
10126 END SUBROUTINE pw_gauss_damp_mix
10127
10128! **************************************************************************************************
10129!> \brief Multiply all data points with the logarithmic derivative of the mixed longrange/Coulomb potential
10130!> Needed for mixed longrange/Coulomb potential
10131!> \param pw ...
10132!> \param omega ...
10133!> \param scale_coul ...
10134!> \param scale_long ...
10135!> \note
10136! **************************************************************************************************
10137 SUBROUTINE pw_log_deriv_mix_cl(pw, omega, scale_coul, scale_long)
10138
10139 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
10140 REAL(kind=dp), INTENT(IN) :: omega, scale_coul, scale_long
10141
10142 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_mix_cl'
10143
10144 INTEGER :: handle, i
10145 REAL(kind=dp) :: omega_2, tmp, tmp2
10146
10147 CALL timeset(routinen, handle)
10148
10149 omega_2 = omega*omega
10150 omega_2 = 0.25_dp/omega_2
10151
10152!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp,tmp2) &
10153!$OMP SHARED(pw,omega_2,scale_long,scale_coul)
10154 DO i = 1, SIZE(pw%array)
10155 tmp = omega_2*pw%pw_grid%gsq(i)
10156 tmp2 = 1.0_dp + scale_long*tmp*exp(-tmp)/(scale_coul + scale_long*exp(-tmp))
10157 pw%array(i) = pw%array(i)*tmp2
10158 END DO
10159!$OMP END PARALLEL DO
10160
10161 CALL timestop(handle)
10162
10163 END SUBROUTINE pw_log_deriv_mix_cl
10164
10165! **************************************************************************************************
10166!> \brief Multiply all data points with a complementary cosine
10167!> Needed for truncated Coulomb potential
10168!> V(\vec r)=1/r if r<rc, 0 else
10169!> V(\vec g)=\frac{4*\pi}{g**2}*(1-cos(g*rc))
10170!> \param pw ...
10171!> \param rcutoff ...
10172!> \par History
10173!> Frederick Stein (07-06-2021) created
10174!> \author Frederick Stein (07-Jun-2021)
10175!> \note
10176!> Multiplies by complementary cosine
10177! **************************************************************************************************
10178 SUBROUTINE pw_truncated(pw, rcutoff)
10179
10180 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10181 REAL(kind=dp), INTENT(IN) :: rcutoff
10182
10183 CHARACTER(len=*), PARAMETER :: routinen = 'pw_truncated'
10184
10185 INTEGER :: handle, i
10186 REAL(kind=dp) :: tmp, tmp2
10187
10188 CALL timeset(routinen, handle)
10189 cpassert(rcutoff >= 0)
10190
10191!$OMP PARALLEL DO PRIVATE(i,tmp,tmp2) DEFAULT(NONE) SHARED(pw, rcutoff)
10192 DO i = 1, SIZE(pw%array)
10193 tmp = sqrt(pw%pw_grid%gsq(i))*rcutoff
10194 IF (tmp >= 0.005_dp) THEN
10195 tmp2 = 1.0_dp - cos(tmp)
10196 ELSE
10197 tmp2 = tmp**2/2.0_dp*(1.0 - tmp**2/12.0_dp)
10198 END IF
10199 pw%array(i) = pw%array(i)*tmp2
10200 END DO
10201!$OMP END PARALLEL DO
10202
10203 CALL timestop(handle)
10204
10205 END SUBROUTINE pw_truncated
10206
10207! **************************************************************************************************
10208!> \brief Multiply all data points with the logarithmic derivative of the complementary cosine
10209!> This function is needed for virials using truncated Coulomb potentials
10210!> \param pw ...
10211!> \param rcutoff ...
10212!> \note
10213! **************************************************************************************************
10214 SUBROUTINE pw_log_deriv_trunc(pw, rcutoff)
10215
10216 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
10217 REAL(kind=dp), INTENT(IN) :: rcutoff
10218
10219 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_trunc'
10220
10221 INTEGER :: handle, i
10222 REAL(kind=dp) :: rchalf, tmp, tmp2
10223
10224 CALL timeset(routinen, handle)
10225 cpassert(rcutoff >= 0)
10226
10227 rchalf = 0.5_dp*rcutoff
10228!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp,tmp2) &
10229!$OMP SHARED(pw,rchalf)
10230 DO i = 1, SIZE(pw%array)
10231 tmp = rchalf*sqrt(pw%pw_grid%gsq(i))
10232 ! For too small arguments, use the Taylor polynomial to prevent division by zero
10233 IF (abs(tmp) >= 0.0001_dp) THEN
10234 tmp2 = 1.0_dp - tmp/tan(tmp)
10235 ELSE
10236 tmp2 = tmp**2*(1.0_dp + tmp**2/15.0_dp)/3.0_dp
10237 END IF
10238 pw%array(i) = pw%array(i)*tmp2
10239 END DO
10240!$OMP END PARALLEL DO
10241
10242 CALL timestop(handle)
10243
10244 END SUBROUTINE pw_log_deriv_trunc
10245
10246! **************************************************************************************************
10247!> \brief Calculate the derivative of a plane wave vector
10248!> \param pw ...
10249!> \param n ...
10250!> \par History
10251!> JGH (06-10-2002) allow only for inplace derivatives
10252!> \author JGH (25-Feb-2001)
10253!> \note
10254!> Calculate the derivative dx^n(1) dy^n(2) dz^n(3) PW
10255! **************************************************************************************************
10256 SUBROUTINE pw_derive(pw, n)
10257
10258 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10259 INTEGER, DIMENSION(3), INTENT(IN) :: n
10260
10261 CHARACTER(len=*), PARAMETER :: routinen = 'pw_derive'
10262
10263 COMPLEX(KIND=dp) :: im
10264 INTEGER :: handle, m, idx, idir
10265
10266 CALL timeset(routinen, handle)
10267
10268 IF (any(n < 0)) &
10269 cpabort("Nonnegative exponents are not supported!")
10270
10271 m = sum(n)
10272 im = gaussi**m
10273
10274 DO idir = 1, 3
10275 IF (n(idir) == 1) THEN
10276!$OMP PARALLEL DO DEFAULT(NONE) SHARED(pw,idir) PRIVATE(idx)
10277 DO idx = 1, SIZE(pw%array)
10278 pw%array(idx) = pw%array(idx)*pw%pw_grid%g(idir, idx)
10279 END DO
10280!$OMP END PARALLEL DO
10281 ELSE IF (n(idir) > 1) THEN
10282!$OMP PARALLEL DO DEFAULT(NONE) SHARED(n, pw,idir) PRIVATE(idx)
10283 DO idx = 1, SIZE(pw%array)
10284 pw%array(idx) = pw%array(idx)*pw%pw_grid%g(idir, idx)**n(idir)
10285 END DO
10286!$OMP END PARALLEL DO
10287 END IF
10288 END DO
10289
10290 ! im can take the values 1, -1, i, -i
10291 ! skip this if im == 1
10292 IF (abs(real(im, kind=dp) - 1.0_dp) > 1.0e-10_dp) THEN
10293!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(im, pw)
10294 pw%array(:) = im*pw%array(:)
10295!$OMP END PARALLEL WORKSHARE
10296 END IF
10297
10298 CALL timestop(handle)
10299
10300 END SUBROUTINE pw_derive
10301
10302! **************************************************************************************************
10303!> \brief Calculate the Laplacian of a plane wave vector
10304!> \param pw ...
10305!> \par History
10306!> Frederick Stein (01-02-2022) created
10307!> \author JGH (25-Feb-2001)
10308!> \note
10309!> Calculate the derivative DELTA PW
10310! **************************************************************************************************
10311 SUBROUTINE pw_laplace(pw)
10312
10313 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10314
10315 CHARACTER(len=*), PARAMETER :: routinen = 'pw_laplace'
10316
10317 INTEGER :: handle
10318
10319 CALL timeset(routinen, handle)
10320
10321!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
10322 pw%array(:) = -pw%array(:)*pw%pw_grid%gsq(:)
10323!$OMP END PARALLEL WORKSHARE
10324
10325 CALL timestop(handle)
10326
10327 END SUBROUTINE pw_laplace
10328
10329! **************************************************************************************************
10330!> \brief Calculate the tensorial 2nd derivative of a plane wave vector
10331!> \param pw ...
10332!> \param pwdr2 ...
10333!> \param i ...
10334!> \param j ...
10335!> \par History
10336!> none
10337!> \author JGH (05-May-2006)
10338!> \note
10339! **************************************************************************************************
10340 SUBROUTINE pw_dr2(pw, pwdr2, i, j)
10341
10342 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw, pwdr2
10343 INTEGER, INTENT(IN) :: i, j
10344
10345 CHARACTER(len=*), PARAMETER :: routinen = 'pw_dr2'
10346
10347 INTEGER :: cnt, handle, ig
10348 REAL(kind=dp) :: gg, o3
10349
10350 CALL timeset(routinen, handle)
10351
10352 o3 = 1.0_dp/3.0_dp
10353
10354 cnt = SIZE(pw%array)
10355
10356 IF (i == j) THEN
10357!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(ig,gg) SHARED(cnt, i, o3, pw, pwdr2)
10358 DO ig = 1, cnt
10359 gg = pw%pw_grid%g(i, ig)**2 - o3*pw%pw_grid%gsq(ig)
10360 pwdr2%array(ig) = gg*pw%array(ig)
10361 END DO
10362!$OMP END PARALLEL DO
10363 ELSE
10364!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) SHARED(cnt, i, j, pw, pwdr2)
10365 DO ig = 1, cnt
10366 pwdr2%array(ig) = pw%array(ig)*(pw%pw_grid%g(i, ig)*pw%pw_grid%g(j, ig))
10367 END DO
10368!$OMP END PARALLEL DO
10369 END IF
10370
10371 CALL timestop(handle)
10372
10373 END SUBROUTINE pw_dr2
10374
10375! **************************************************************************************************
10376!> \brief Calculate the tensorial 2nd derivative of a plane wave vector
10377!> and divides by |G|^2. pwdr2_gg(G=0) is put to zero.
10378!> \param pw ...
10379!> \param pwdr2_gg ...
10380!> \param i ...
10381!> \param j ...
10382!> \par History
10383!> none
10384!> \author RD (20-Nov-2006)
10385!> \note
10386!> Adapted from pw_dr2
10387! **************************************************************************************************
10388 SUBROUTINE pw_dr2_gg(pw, pwdr2_gg, i, j)
10389
10390 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw, pwdr2_gg
10391 INTEGER, INTENT(IN) :: i, j
10392
10393 INTEGER :: cnt, handle, ig
10394 REAL(kind=dp) :: gg, o3
10395 CHARACTER(len=*), PARAMETER :: routinen = 'pw_dr2_gg'
10396
10397 CALL timeset(routinen, handle)
10398
10399 o3 = 1.0_dp/3.0_dp
10400
10401 cnt = SIZE(pw%array)
10402
10403 IF (i == j) THEN
10404!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) PRIVATE(gg) SHARED(cnt, i, o3, pw, pwdr2_gg)
10405 DO ig = pw%pw_grid%first_gne0, cnt
10406 gg = pw%pw_grid%g(i, ig)**2 - o3*pw%pw_grid%gsq(ig)
10407 pwdr2_gg%array(ig) = gg/pw%pw_grid%gsq(ig)*pw%array(ig)
10408 END DO
10409!$OMP END PARALLEL DO
10410 ELSE
10411!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) SHARED(cnt, i, j, pw, pwdr2_gg)
10412 DO ig = pw%pw_grid%first_gne0, cnt
10413 pwdr2_gg%array(ig) = pw%array(ig)*((pw%pw_grid%g(i, ig)*pw%pw_grid%g(j, ig)) &
10414 /pw%pw_grid%gsq(ig))
10415 END DO
10416!$OMP END PARALLEL DO
10417 END IF
10418
10419 IF (pw%pw_grid%have_g0) pwdr2_gg%array(1) = 0.0_dp
10420
10421 CALL timestop(handle)
10422
10423 END SUBROUTINE pw_dr2_gg
10424
10425! **************************************************************************************************
10426!> \brief Multiplies a G-space function with a smoothing factor of the form
10427!> f(|G|) = exp((ecut - G^2)/sigma)/(1+exp((ecut - G^2)/sigma))
10428!> \param pw ...
10429!> \param ecut ...
10430!> \param sigma ...
10431!> \par History
10432!> none
10433!> \author JGH (09-June-2006)
10434!> \note
10435! **************************************************************************************************
10436 SUBROUTINE pw_smoothing(pw, ecut, sigma)
10437
10438 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10439 REAL(kind=dp), INTENT(IN) :: ecut, sigma
10440
10441 CHARACTER(len=*), PARAMETER :: routinen = 'pw_smoothing'
10442
10443 INTEGER :: cnt, handle, ig
10444 REAL(kind=dp) :: arg, f
10445
10446 CALL timeset(routinen, handle)
10447
10448 cnt = SIZE(pw%array)
10449
10450!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(ig, arg, f) SHARED(cnt, ecut, pw, sigma)
10451 DO ig = 1, cnt
10452 arg = (ecut - pw%pw_grid%gsq(ig))/sigma
10453 f = exp(arg)/(1 + exp(arg))
10454 pw%array(ig) = f*pw%array(ig)
10455 END DO
10456!$OMP END PARALLEL DO
10457
10458 CALL timestop(handle)
10459
10460 END SUBROUTINE pw_smoothing
10461
10462! **************************************************************************************************
10463!> \brief ...
10464!> \param grida ...
10465!> \param gridb ...
10466!> \return ...
10467! **************************************************************************************************
10468 ELEMENTAL FUNCTION pw_compatible(grida, gridb) RESULT(compat)
10469
10470 TYPE(pw_grid_type), INTENT(IN) :: grida, gridb
10471 LOGICAL :: compat
10472
10473 compat = .false.
10474
10475 IF (grida%id_nr == gridb%id_nr) THEN
10476 compat = .true.
10477 ELSE IF (grida%reference == gridb%id_nr) THEN
10478 compat = .true.
10479 ELSE IF (gridb%reference == grida%id_nr) THEN
10480 compat = .true.
10481 END IF
10482
10483 END FUNCTION pw_compatible
10484
10485! **************************************************************************************************
10486!> \brief Calculate the structure factor for point r
10487!> \param sf ...
10488!> \param r ...
10489!> \par History
10490!> none
10491!> \author JGH (05-May-2006)
10492!> \note
10493! **************************************************************************************************
10494 SUBROUTINE pw_structure_factor(sf, r)
10495
10496 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: sf
10497 REAL(kind=dp), DIMENSION(:), INTENT(IN) :: r
10498
10499 CHARACTER(len=*), PARAMETER :: routinen = 'pw_structure_factor'
10500
10501 INTEGER :: cnt, handle, ig
10502 REAL(kind=dp) :: arg
10503
10504 CALL timeset(routinen, handle)
10505
10506 cnt = SIZE(sf%array)
10507
10508!$OMP PARALLEL DO PRIVATE (ig, arg) DEFAULT(NONE) SHARED(cnt, r, sf)
10509 DO ig = 1, cnt
10510 arg = dot_product(sf%pw_grid%g(:, ig), r)
10511 sf%array(ig) = cmplx(cos(arg), -sin(arg), kind=dp)
10512 END DO
10513!$OMP END PARALLEL DO
10514
10515 CALL timestop(handle)
10516
10517 END SUBROUTINE pw_structure_factor
10518
10519 END MODULE pw_methods
static GRID_HOST_DEVICE int idx(const orbital a)
Return coset index of given orbital angular momentum.
various routines to log and control the output. The idea is that decisions about where to log should ...
integer function, public cp_logger_get_default_io_unit(logger)
returns the unit nr for the ionode (-1 on all other processors) skips as well checks if the procs cal...
integer, parameter, public bwfft
Definition fft_tools.F:146
integer, parameter, public fwfft
Definition fft_tools.F:146
sums arrays of real/complex numbers with much reduced round-off as compared to a naive implementation...
Definition kahan_sum.F:29
Defines the basic variable types.
Definition kinds.F:23
integer, parameter, public dp
Definition kinds.F:34
Machine interface based on Fortran 2003 and POSIX.
Definition machine.F:17
subroutine, public m_memory(mem)
Returns the total amount of memory [bytes] in use, if known, zero otherwise.
Definition machine.F:456
Definition of mathematical constants and functions.
complex(kind=dp), parameter, public gaussi
complex(kind=dp), parameter, public z_zero
subroutine, public pw_copy_match(pw1, pw2)
copy a pw type variable
Definition pw_copy_all.F:43
integer function, public pw_fpga_init_bitstream(n)
Invoke the pw_fpga_check_bitstream C function passing the path to the data dir.
Definition pw_fpga.F:263
subroutine, public pw_fpga_r3dc1d_3d_dp(n, c_out)
perform an in-place double precision fft3d on the FPGA
Definition pw_fpga.F:146
subroutine, public pw_fpga_r3dc1d_3d_sp(n, c_out)
perform an in-place single precision fft3d on the FPGA
Definition pw_fpga.F:194
subroutine, public pw_fpga_c1dr3d_3d_dp(n, c_out)
perform an in-place double precision inverse fft3d on the FPGA
Definition pw_fpga.F:170
subroutine, public pw_fpga_c1dr3d_3d_sp(n, c_out)
perform an in-place single precision inverse fft3d on the FPGA
Definition pw_fpga.F:228
subroutine, public pw_gpu_c1dr3d_3d_ps(pw1, pw2)
perform an parallel scatter followed by a fft on the gpu
Definition pw_gpu.F:380
subroutine, public pw_gpu_c1dr3d_3d(pw1, pw2)
perform an scatter followed by a fft on the gpu
Definition pw_gpu.F:160
subroutine, public pw_gpu_r3dc1d_3d(pw1, pw2)
perform an fft followed by a gather on the gpu
Definition pw_gpu.F:102
subroutine, public pw_gpu_r3dc1d_3d_ps(pw1, pw2)
perform an parallel fft followed by a gather on the gpu
Definition pw_gpu.F:218
integer, parameter, public halfspace
integer, parameter, public pw_mode_local
integer, parameter, public pw_mode_distributed
subroutine, public pw_smoothing(pw, ecut, sigma)
Multiplies a G-space function with a smoothing factor of the form f(|G|) = exp((ecut - G^2)/sigma)/(1...
subroutine, public pw_log_deriv_trunc(pw, rcutoff)
Multiply all data points with the logarithmic derivative of the complementary cosine This function is...
subroutine, public pw_gauss_damp(pw, omega)
Multiply all data points with a Gaussian damping factor Needed for longrange Coulomb potential V(\vec...
subroutine, public pw_log_deriv_compl_gauss(pw, omega)
Multiply all data points with the logarithmic derivative of the complementary Gaussian damping factor...
subroutine, public pw_dr2_gg(pw, pwdr2_gg, i, j)
Calculate the tensorial 2nd derivative of a plane wave vector and divides by |G|^2....
subroutine, public pw_laplace(pw)
Calculate the Laplacian of a plane wave vector.
subroutine, public pw_log_deriv_gauss(pw, omega)
Multiply all data points with the logarithmic derivative of a Gaussian.
subroutine, public pw_gauss_damp_mix(pw, omega, scale_coul, scale_long)
Multiply all data points with a Gaussian damping factor and mixes it with the original function Neede...
subroutine, public pw_truncated(pw, rcutoff)
Multiply all data points with a complementary cosine Needed for truncated Coulomb potential V(\vec r)...
integer, parameter, public do_accurate_sum
Definition pw_methods.F:80
integer, parameter, public do_standard_sum
Definition pw_methods.F:80
subroutine, public pw_derive(pw, n)
Calculate the derivative of a plane wave vector.
subroutine, public pw_structure_factor(sf, r)
Calculate the structure factor for point r.
subroutine, public pw_compl_gauss_damp(pw, omega)
Multiply all data points with a Gaussian damping factor Needed for longrange Coulomb potential V(\vec...
subroutine, public pw_log_deriv_mix_cl(pw, omega, scale_coul, scale_long)
Multiply all data points with the logarithmic derivative of the mixed longrange/Coulomb potential Nee...
subroutine, public pw_dr2(pw, pwdr2, i, j)
Calculate the tensorial 2nd derivative of a plane wave vector.