(git:d18deda)
Loading...
Searching...
No Matches
pw_methods.F
Go to the documentation of this file.
1!--------------------------------------------------------------------------------------------------!
2! CP2K: A general program to perform molecular dynamics simulations !
3! Copyright 2000-2025 CP2K developers group <https://cp2k.org> !
4! !
5! SPDX-License-Identifier: GPL-2.0-or-later !
6!--------------------------------------------------------------------------------------------------!
7
8! **************************************************************************************************
9!> \note
10!> If parallel mode is distributed certain combination of
11!> "in_use" and "in_space" can not be used.
12!> For performance reasons it would be better to have the loops
13!> over g-vectors in the gather/scatter routines in new subprograms
14!> with the actual arrays (also the addressing) in the parameter list
15!> \par History
16!> JGH (29-Dec-2000) : Changes for parallel use
17!> JGH (13-Mar-2001) : added timing calls
18!> JGH (26-Feb-2003) : OpenMP enabled
19!> JGH (17-Nov-2007) : Removed mass arrays
20!> JGH (01-Dec-2007) : Removed and renamed routines
21!> JGH (04-Jul-2019) : added pw_multiply routine
22!> 03.2008 [tlaino] : Splitting pw_types into pw_types and pw_methods
23!> \author apsi
24! **************************************************************************************************
26
27
30 USE fft_tools, ONLY: bwfft, &
31 fwfft, &
32 fft3d
35 USE kinds, ONLY: dp
36 USE machine, ONLY: m_memory
37 USE mathconstants, ONLY: z_zero
38 USE pw_copy_all, ONLY: pw_copy_match
39 USE pw_fpga, ONLY: pw_fpga_c1dr3d_3d_dp, &
44 USE pw_gpu, ONLY: pw_gpu_c1dr3d_3d, &
48 USE pw_grid_types, ONLY: halfspace, &
52 USE pw_types, ONLY: pw_r1d_rs_type
53 USE pw_types, ONLY: pw_r3d_rs_type
54 USE pw_types, ONLY: pw_c1d_rs_type
55 USE pw_types, ONLY: pw_c3d_rs_type
56 USE pw_types, ONLY: pw_r1d_gs_type
57 USE pw_types, ONLY: pw_r3d_gs_type
58 USE pw_types, ONLY: pw_c1d_gs_type
59 USE pw_types, ONLY: pw_c3d_gs_type
60#include "../base/base_uses.f90"
61
62 IMPLICIT NONE
63
64 PRIVATE
65
73 PUBLIC :: pw_set, pw_truncated
74 PUBLIC :: pw_scatter, pw_gather
76
77 CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'pw_methods'
78 LOGICAL, PARAMETER, PRIVATE :: debug_this_module = .false.
79 INTEGER, PARAMETER, PUBLIC :: do_accurate_sum = 0, &
81
82 INTERFACE pw_zero
83 MODULE PROCEDURE pw_zero_r1d_rs
84 MODULE PROCEDURE pw_zero_r3d_rs
85 MODULE PROCEDURE pw_zero_c1d_rs
86 MODULE PROCEDURE pw_zero_c3d_rs
87 MODULE PROCEDURE pw_zero_r1d_gs
88 MODULE PROCEDURE pw_zero_r3d_gs
89 MODULE PROCEDURE pw_zero_c1d_gs
90 MODULE PROCEDURE pw_zero_c3d_gs
91 END INTERFACE
92
93 INTERFACE pw_scale
94 MODULE PROCEDURE pw_scale_r1d_rs
95 MODULE PROCEDURE pw_scale_r3d_rs
96 MODULE PROCEDURE pw_scale_c1d_rs
97 MODULE PROCEDURE pw_scale_c3d_rs
98 MODULE PROCEDURE pw_scale_r1d_gs
99 MODULE PROCEDURE pw_scale_r3d_gs
100 MODULE PROCEDURE pw_scale_c1d_gs
101 MODULE PROCEDURE pw_scale_c3d_gs
102 END INTERFACE
103
104 INTERFACE pw_write
105 MODULE PROCEDURE pw_write_r1d_rs
106 MODULE PROCEDURE pw_write_r3d_rs
107 MODULE PROCEDURE pw_write_c1d_rs
108 MODULE PROCEDURE pw_write_c3d_rs
109 MODULE PROCEDURE pw_write_r1d_gs
110 MODULE PROCEDURE pw_write_r3d_gs
111 MODULE PROCEDURE pw_write_c1d_gs
112 MODULE PROCEDURE pw_write_c3d_gs
113 END INTERFACE
114
116 MODULE PROCEDURE pw_integrate_function_r1d_rs
117 MODULE PROCEDURE pw_integrate_function_r3d_rs
118 MODULE PROCEDURE pw_integrate_function_c1d_rs
119 MODULE PROCEDURE pw_integrate_function_c3d_rs
120 MODULE PROCEDURE pw_integrate_function_r1d_gs
121 MODULE PROCEDURE pw_integrate_function_r3d_gs
122 MODULE PROCEDURE pw_integrate_function_c1d_gs
123 MODULE PROCEDURE pw_integrate_function_c3d_gs
124 END INTERFACE
125
126 INTERFACE pw_set
127 MODULE PROCEDURE pw_set_value_r1d_rs
128 MODULE PROCEDURE pw_zero_r1d_rs
129 MODULE PROCEDURE pw_set_value_r3d_rs
130 MODULE PROCEDURE pw_zero_r3d_rs
131 MODULE PROCEDURE pw_set_value_c1d_rs
132 MODULE PROCEDURE pw_zero_c1d_rs
133 MODULE PROCEDURE pw_set_value_c3d_rs
134 MODULE PROCEDURE pw_zero_c3d_rs
135 MODULE PROCEDURE pw_set_value_r1d_gs
136 MODULE PROCEDURE pw_zero_r1d_gs
137 MODULE PROCEDURE pw_set_value_r3d_gs
138 MODULE PROCEDURE pw_zero_r3d_gs
139 MODULE PROCEDURE pw_set_value_c1d_gs
140 MODULE PROCEDURE pw_zero_c1d_gs
141 MODULE PROCEDURE pw_set_value_c3d_gs
142 MODULE PROCEDURE pw_zero_c3d_gs
143 END INTERFACE
144
145 INTERFACE pw_copy
146 MODULE PROCEDURE pw_copy_r1d_r1d_rs
147 MODULE PROCEDURE pw_copy_r1d_c1d_rs
148 MODULE PROCEDURE pw_copy_r3d_r3d_rs
149 MODULE PROCEDURE pw_copy_r3d_c3d_rs
150 MODULE PROCEDURE pw_copy_c1d_r1d_rs
151 MODULE PROCEDURE pw_copy_c1d_c1d_rs
152 MODULE PROCEDURE pw_copy_c3d_r3d_rs
153 MODULE PROCEDURE pw_copy_c3d_c3d_rs
154 MODULE PROCEDURE pw_copy_r1d_r1d_gs
155 MODULE PROCEDURE pw_copy_r1d_c1d_gs
156 MODULE PROCEDURE pw_copy_r3d_r3d_gs
157 MODULE PROCEDURE pw_copy_r3d_c3d_gs
158 MODULE PROCEDURE pw_copy_c1d_r1d_gs
159 MODULE PROCEDURE pw_copy_c1d_c1d_gs
160 MODULE PROCEDURE pw_copy_c3d_r3d_gs
161 MODULE PROCEDURE pw_copy_c3d_c3d_gs
162 END INTERFACE
163
164 INTERFACE pw_axpy
165 MODULE PROCEDURE pw_axpy_r1d_r1d_rs
166 MODULE PROCEDURE pw_axpy_r1d_c1d_rs
167 MODULE PROCEDURE pw_axpy_r3d_r3d_rs
168 MODULE PROCEDURE pw_axpy_r3d_c3d_rs
169 MODULE PROCEDURE pw_axpy_c1d_r1d_rs
170 MODULE PROCEDURE pw_axpy_c1d_c1d_rs
171 MODULE PROCEDURE pw_axpy_c3d_r3d_rs
172 MODULE PROCEDURE pw_axpy_c3d_c3d_rs
173 MODULE PROCEDURE pw_axpy_r1d_r1d_gs
174 MODULE PROCEDURE pw_axpy_r1d_c1d_gs
175 MODULE PROCEDURE pw_axpy_r3d_r3d_gs
176 MODULE PROCEDURE pw_axpy_r3d_c3d_gs
177 MODULE PROCEDURE pw_axpy_c1d_r1d_gs
178 MODULE PROCEDURE pw_axpy_c1d_c1d_gs
179 MODULE PROCEDURE pw_axpy_c3d_r3d_gs
180 MODULE PROCEDURE pw_axpy_c3d_c3d_gs
181 END INTERFACE
182
183 INTERFACE pw_multiply
184 MODULE PROCEDURE pw_multiply_r1d_r1d_rs
185 MODULE PROCEDURE pw_multiply_r1d_c1d_rs
186 MODULE PROCEDURE pw_multiply_r3d_r3d_rs
187 MODULE PROCEDURE pw_multiply_r3d_c3d_rs
188 MODULE PROCEDURE pw_multiply_c1d_r1d_rs
189 MODULE PROCEDURE pw_multiply_c1d_c1d_rs
190 MODULE PROCEDURE pw_multiply_c3d_r3d_rs
191 MODULE PROCEDURE pw_multiply_c3d_c3d_rs
192 MODULE PROCEDURE pw_multiply_r1d_r1d_gs
193 MODULE PROCEDURE pw_multiply_r1d_c1d_gs
194 MODULE PROCEDURE pw_multiply_r3d_r3d_gs
195 MODULE PROCEDURE pw_multiply_r3d_c3d_gs
196 MODULE PROCEDURE pw_multiply_c1d_r1d_gs
197 MODULE PROCEDURE pw_multiply_c1d_c1d_gs
198 MODULE PROCEDURE pw_multiply_c3d_r3d_gs
199 MODULE PROCEDURE pw_multiply_c3d_c3d_gs
200 END INTERFACE
201
203 MODULE PROCEDURE pw_multiply_with_r1d_r1d_rs
204 MODULE PROCEDURE pw_multiply_with_r1d_c1d_rs
205 MODULE PROCEDURE pw_multiply_with_r3d_r3d_rs
206 MODULE PROCEDURE pw_multiply_with_r3d_c3d_rs
207 MODULE PROCEDURE pw_multiply_with_c1d_r1d_rs
208 MODULE PROCEDURE pw_multiply_with_c1d_c1d_rs
209 MODULE PROCEDURE pw_multiply_with_c3d_r3d_rs
210 MODULE PROCEDURE pw_multiply_with_c3d_c3d_rs
211 MODULE PROCEDURE pw_multiply_with_r1d_r1d_gs
212 MODULE PROCEDURE pw_multiply_with_r1d_c1d_gs
213 MODULE PROCEDURE pw_multiply_with_r3d_r3d_gs
214 MODULE PROCEDURE pw_multiply_with_r3d_c3d_gs
215 MODULE PROCEDURE pw_multiply_with_c1d_r1d_gs
216 MODULE PROCEDURE pw_multiply_with_c1d_c1d_gs
217 MODULE PROCEDURE pw_multiply_with_c3d_r3d_gs
218 MODULE PROCEDURE pw_multiply_with_c3d_c3d_gs
219 END INTERFACE
220
222 MODULE PROCEDURE pw_integral_ab_r1d_r1d_rs
223 MODULE PROCEDURE pw_integral_ab_r1d_c1d_rs
224 MODULE PROCEDURE pw_integral_ab_r3d_r3d_rs
225 MODULE PROCEDURE pw_integral_ab_r3d_c3d_rs
226 MODULE PROCEDURE pw_integral_ab_c1d_r1d_rs
227 MODULE PROCEDURE pw_integral_ab_c1d_c1d_rs
228 MODULE PROCEDURE pw_integral_ab_c3d_r3d_rs
229 MODULE PROCEDURE pw_integral_ab_c3d_c3d_rs
230 MODULE PROCEDURE pw_integral_ab_r1d_r1d_gs
231 MODULE PROCEDURE pw_integral_ab_r1d_c1d_gs
232 MODULE PROCEDURE pw_integral_ab_r3d_r3d_gs
233 MODULE PROCEDURE pw_integral_ab_r3d_c3d_gs
234 MODULE PROCEDURE pw_integral_ab_c1d_r1d_gs
235 MODULE PROCEDURE pw_integral_ab_c1d_c1d_gs
236 MODULE PROCEDURE pw_integral_ab_c3d_r3d_gs
237 MODULE PROCEDURE pw_integral_ab_c3d_c3d_gs
238 END INTERFACE
239
241 MODULE PROCEDURE pw_integral_a2b_r1d_r1d
242 MODULE PROCEDURE pw_integral_a2b_r1d_c1d
243 MODULE PROCEDURE pw_integral_a2b_c1d_r1d
244 MODULE PROCEDURE pw_integral_a2b_c1d_c1d
245 END INTERFACE
246
247 INTERFACE pw_gather
248 MODULE PROCEDURE pw_gather_p_r1d
249 MODULE PROCEDURE pw_gather_p_c1d
250 MODULE PROCEDURE pw_gather_s_r1d_r3d
251 MODULE PROCEDURE pw_gather_s_r1d_c3d
252 MODULE PROCEDURE pw_gather_s_c1d_r3d
253 MODULE PROCEDURE pw_gather_s_c1d_c3d
254 END INTERFACE
255
256 INTERFACE pw_scatter
257 MODULE PROCEDURE pw_scatter_p_r1d
258 MODULE PROCEDURE pw_scatter_p_c1d
259 MODULE PROCEDURE pw_scatter_s_r1d_r3d
260 MODULE PROCEDURE pw_scatter_s_r1d_c3d
261 MODULE PROCEDURE pw_scatter_s_c1d_r3d
262 MODULE PROCEDURE pw_scatter_s_c1d_c3d
263 END INTERFACE
264
266 MODULE PROCEDURE pw_copy_to_array_r1d_r1d_rs
267 MODULE PROCEDURE pw_copy_to_array_r1d_c1d_rs
268 MODULE PROCEDURE pw_copy_to_array_r3d_r3d_rs
269 MODULE PROCEDURE pw_copy_to_array_r3d_c3d_rs
270 MODULE PROCEDURE pw_copy_to_array_c1d_r1d_rs
271 MODULE PROCEDURE pw_copy_to_array_c1d_c1d_rs
272 MODULE PROCEDURE pw_copy_to_array_c3d_r3d_rs
273 MODULE PROCEDURE pw_copy_to_array_c3d_c3d_rs
274 MODULE PROCEDURE pw_copy_to_array_r1d_r1d_gs
275 MODULE PROCEDURE pw_copy_to_array_r1d_c1d_gs
276 MODULE PROCEDURE pw_copy_to_array_r3d_r3d_gs
277 MODULE PROCEDURE pw_copy_to_array_r3d_c3d_gs
278 MODULE PROCEDURE pw_copy_to_array_c1d_r1d_gs
279 MODULE PROCEDURE pw_copy_to_array_c1d_c1d_gs
280 MODULE PROCEDURE pw_copy_to_array_c3d_r3d_gs
281 MODULE PROCEDURE pw_copy_to_array_c3d_c3d_gs
282 END INTERFACE
283
285 MODULE PROCEDURE pw_copy_from_array_r1d_r1d_rs
286 MODULE PROCEDURE pw_copy_from_array_r1d_c1d_rs
287 MODULE PROCEDURE pw_copy_from_array_r3d_r3d_rs
288 MODULE PROCEDURE pw_copy_from_array_r3d_c3d_rs
289 MODULE PROCEDURE pw_copy_from_array_c1d_r1d_rs
290 MODULE PROCEDURE pw_copy_from_array_c1d_c1d_rs
291 MODULE PROCEDURE pw_copy_from_array_c3d_r3d_rs
292 MODULE PROCEDURE pw_copy_from_array_c3d_c3d_rs
293 MODULE PROCEDURE pw_copy_from_array_r1d_r1d_gs
294 MODULE PROCEDURE pw_copy_from_array_r1d_c1d_gs
295 MODULE PROCEDURE pw_copy_from_array_r3d_r3d_gs
296 MODULE PROCEDURE pw_copy_from_array_r3d_c3d_gs
297 MODULE PROCEDURE pw_copy_from_array_c1d_r1d_gs
298 MODULE PROCEDURE pw_copy_from_array_c1d_c1d_gs
299 MODULE PROCEDURE pw_copy_from_array_c3d_r3d_gs
300 MODULE PROCEDURE pw_copy_from_array_c3d_c3d_gs
301 END INTERFACE
302
303 INTERFACE pw_transfer
304 MODULE PROCEDURE pw_copy_r1d_r1d_rs
305 MODULE PROCEDURE pw_copy_r1d_r1d_gs
306 MODULE PROCEDURE pw_gather_s_r1d_r3d_2
307 MODULE PROCEDURE pw_scatter_s_r1d_r3d_2
308 MODULE PROCEDURE pw_copy_r1d_c1d_rs
309 MODULE PROCEDURE pw_copy_r1d_c1d_gs
310 MODULE PROCEDURE pw_gather_s_r1d_c3d_2
311 MODULE PROCEDURE pw_scatter_s_r1d_c3d_2
312 MODULE PROCEDURE pw_copy_r3d_r3d_rs
313 MODULE PROCEDURE pw_copy_r3d_r3d_gs
314 MODULE PROCEDURE fft_wrap_pw1pw2_r3d_c1d_rs_gs
315 MODULE PROCEDURE pw_copy_r3d_c3d_rs
316 MODULE PROCEDURE pw_copy_r3d_c3d_gs
317 MODULE PROCEDURE fft_wrap_pw1pw2_r3d_c3d_rs_gs
318 MODULE PROCEDURE pw_copy_c1d_r1d_rs
319 MODULE PROCEDURE pw_copy_c1d_r1d_gs
320 MODULE PROCEDURE pw_gather_s_c1d_r3d_2
321 MODULE PROCEDURE pw_scatter_s_c1d_r3d_2
322 MODULE PROCEDURE fft_wrap_pw1pw2_c1d_r3d_gs_rs
323 MODULE PROCEDURE pw_copy_c1d_c1d_rs
324 MODULE PROCEDURE pw_copy_c1d_c1d_gs
325 MODULE PROCEDURE pw_gather_s_c1d_c3d_2
326 MODULE PROCEDURE pw_scatter_s_c1d_c3d_2
327 MODULE PROCEDURE fft_wrap_pw1pw2_c1d_c3d_gs_rs
328 MODULE PROCEDURE pw_copy_c3d_r3d_rs
329 MODULE PROCEDURE pw_copy_c3d_r3d_gs
330 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_r3d_gs_rs
331 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c1d_rs_gs
332 MODULE PROCEDURE pw_copy_c3d_c3d_rs
333 MODULE PROCEDURE pw_copy_c3d_c3d_gs
334 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c3d_rs_gs
335 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c3d_gs_rs
336 END INTERFACE
337
338CONTAINS
339! **************************************************************************************************
340!> \brief Set values of a pw type to zero
341!> \param pw ...
342!> \par History
343!> none
344!> \author apsi
345! **************************************************************************************************
346 SUBROUTINE pw_zero_r1d_rs (pw)
347
348 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw
349
350 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
351
352 INTEGER :: handle
353
354 CALL timeset(routinen, handle)
355
356#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
357!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
358 pw%array = 0.0_dp
359!$OMP END PARALLEL WORKSHARE
360#else
361 pw%array = 0.0_dp
362#endif
363
364 CALL timestop(handle)
365
366 END SUBROUTINE pw_zero_r1d_rs
367
368! **************************************************************************************************
369!> \brief multiplies pw coeffs with a number
370!> \param pw ...
371!> \param a ...
372!> \par History
373!> 11.2004 created [Joost VandeVondele]
374! **************************************************************************************************
375 SUBROUTINE pw_scale_r1d_rs (pw, a)
376
377 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw
378 REAL(KIND=dp), INTENT(IN) :: a
379
380 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
381
382 INTEGER :: handle
383
384 CALL timeset(routinen, handle)
385
386!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
387 pw%array = a*pw%array
388!$OMP END PARALLEL WORKSHARE
389
390 CALL timestop(handle)
391
392 END SUBROUTINE pw_scale_r1d_rs
393
394! **************************************************************************************************
395!> \brief writes a small description of the actual grid
396!> (change to output the data as cube file, maybe with an
397!> optional long_description arg?)
398!> \param pw the pw data to output
399!> \param unit_nr the unit to output to
400!> \par History
401!> 08.2002 created [fawzi]
402!> \author Fawzi Mohamed
403! **************************************************************************************************
404 SUBROUTINE pw_write_r1d_rs (pw, unit_nr)
405
406 TYPE(pw_r1d_rs_type), INTENT(in) :: pw
407 INTEGER, INTENT(in) :: unit_nr
408
409 INTEGER :: iostatus
410
411 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
412
413 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r1d"
414 IF (ASSOCIATED(pw%array)) THEN
415 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,')>')") &
416 lbound(pw%array, 1), ubound(pw%array, 1)
417 ELSE
418 WRITE (unit=unit_nr, fmt="(' array=*null*')")
419 END IF
420
421 END SUBROUTINE pw_write_r1d_rs
422
423! **************************************************************************************************
424!> \brief ...
425!> \param fun ...
426!> \param isign ...
427!> \param oprt ...
428!> \return ...
429! **************************************************************************************************
430 FUNCTION pw_integrate_function_r1d_rs (fun, isign, oprt) RESULT(total_fun)
431
432 TYPE(pw_r1d_rs_type), INTENT(IN) :: fun
433 INTEGER, INTENT(IN), OPTIONAL :: isign
434 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
435 REAL(kind=dp) :: total_fun
436
437 INTEGER :: iop
438
439 iop = 0
440
441 IF (PRESENT(oprt)) THEN
442 SELECT CASE (oprt)
443 CASE ("ABS", "abs")
444 iop = 1
445 CASE DEFAULT
446 cpabort("Unknown operator")
447 END SELECT
448 END IF
449
450 total_fun = 0.0_dp
451
452 ! do reduction using maximum accuracy
453 IF (iop == 1) THEN
454 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
455 ELSE
456 total_fun = fun%pw_grid%dvol*accurate_sum( fun%array)
457 END IF
458
459 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
460 CALL fun%pw_grid%para%group%sum(total_fun)
461 END IF
462
463 IF (PRESENT(isign)) THEN
464 total_fun = total_fun*sign(1._dp, real(isign, dp))
465 END IF
466
467 END FUNCTION pw_integrate_function_r1d_rs
468
469! **************************************************************************************************
470!> \brief ...
471!> \param pw ...
472!> \param value ...
473! **************************************************************************************************
474 SUBROUTINE pw_set_value_r1d_rs (pw, value)
475 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
476 REAL(KIND=dp), INTENT(IN) :: value
477
478 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
479
480 INTEGER :: handle
481
482 CALL timeset(routinen, handle)
483
484!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
485 pw%array = value
486!$OMP END PARALLEL WORKSHARE
487
488 CALL timestop(handle)
489
490 END SUBROUTINE pw_set_value_r1d_rs
491! **************************************************************************************************
492!> \brief Set values of a pw type to zero
493!> \param pw ...
494!> \par History
495!> none
496!> \author apsi
497! **************************************************************************************************
498 SUBROUTINE pw_zero_r1d_gs (pw)
499
500 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
501
502 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
503
504 INTEGER :: handle
505
506 CALL timeset(routinen, handle)
507
508#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
509!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
510 pw%array = 0.0_dp
511!$OMP END PARALLEL WORKSHARE
512#else
513 pw%array = 0.0_dp
514#endif
515
516 CALL timestop(handle)
517
518 END SUBROUTINE pw_zero_r1d_gs
519
520! **************************************************************************************************
521!> \brief multiplies pw coeffs with a number
522!> \param pw ...
523!> \param a ...
524!> \par History
525!> 11.2004 created [Joost VandeVondele]
526! **************************************************************************************************
527 SUBROUTINE pw_scale_r1d_gs (pw, a)
528
529 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
530 REAL(KIND=dp), INTENT(IN) :: a
531
532 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
533
534 INTEGER :: handle
535
536 CALL timeset(routinen, handle)
537
538!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
539 pw%array = a*pw%array
540!$OMP END PARALLEL WORKSHARE
541
542 CALL timestop(handle)
543
544 END SUBROUTINE pw_scale_r1d_gs
545
546! **************************************************************************************************
547!> \brief writes a small description of the actual grid
548!> (change to output the data as cube file, maybe with an
549!> optional long_description arg?)
550!> \param pw the pw data to output
551!> \param unit_nr the unit to output to
552!> \par History
553!> 08.2002 created [fawzi]
554!> \author Fawzi Mohamed
555! **************************************************************************************************
556 SUBROUTINE pw_write_r1d_gs (pw, unit_nr)
557
558 TYPE(pw_r1d_gs_type), INTENT(in) :: pw
559 INTEGER, INTENT(in) :: unit_nr
560
561 INTEGER :: iostatus
562
563 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
564
565 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r1d"
566 IF (ASSOCIATED(pw%array)) THEN
567 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,')>')") &
568 lbound(pw%array, 1), ubound(pw%array, 1)
569 ELSE
570 WRITE (unit=unit_nr, fmt="(' array=*null*')")
571 END IF
572
573 END SUBROUTINE pw_write_r1d_gs
574
575! **************************************************************************************************
576!> \brief ...
577!> \param fun ...
578!> \param isign ...
579!> \param oprt ...
580!> \return ...
581! **************************************************************************************************
582 FUNCTION pw_integrate_function_r1d_gs (fun, isign, oprt) RESULT(total_fun)
583
584 TYPE(pw_r1d_gs_type), INTENT(IN) :: fun
585 INTEGER, INTENT(IN), OPTIONAL :: isign
586 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
587 REAL(kind=dp) :: total_fun
588
589 INTEGER :: iop
590
591 iop = 0
592
593 IF (PRESENT(oprt)) THEN
594 SELECT CASE (oprt)
595 CASE ("ABS", "abs")
596 iop = 1
597 CASE DEFAULT
598 cpabort("Unknown operator")
599 END SELECT
600 END IF
601
602 total_fun = 0.0_dp
603
604 IF (iop == 1) &
605 cpabort("Operator ABS not implemented")
606 IF (fun%pw_grid%have_g0) total_fun = fun%pw_grid%vol* fun%array(1)
607
608 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
609 CALL fun%pw_grid%para%group%sum(total_fun)
610 END IF
611
612 IF (PRESENT(isign)) THEN
613 total_fun = total_fun*sign(1._dp, real(isign, dp))
614 END IF
615
616 END FUNCTION pw_integrate_function_r1d_gs
617
618! **************************************************************************************************
619!> \brief ...
620!> \param pw ...
621!> \param value ...
622! **************************************************************************************************
623 SUBROUTINE pw_set_value_r1d_gs (pw, value)
624 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
625 REAL(KIND=dp), INTENT(IN) :: value
626
627 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
628
629 INTEGER :: handle
630
631 CALL timeset(routinen, handle)
632
633!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
634 pw%array = value
635!$OMP END PARALLEL WORKSHARE
636
637 CALL timestop(handle)
638
639 END SUBROUTINE pw_set_value_r1d_gs
640
641! **************************************************************************************************
642!> \brief ...
643!> \param pw ...
644!> \param c ...
645!> \param scale ...
646! **************************************************************************************************
647 SUBROUTINE pw_gather_p_r1d (pw, c, scale)
648
649 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
650 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(IN) :: c
651 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
652
653 CHARACTER(len=*), PARAMETER :: routineN = 'pw_gather_p'
654
655 INTEGER :: gpt, handle, l, m, mn, n
656
657 CALL timeset(routinen, handle)
658
659 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
660 cpabort("This grid type is not distributed")
661 END IF
662
663 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
664 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq)
665
666 IF (PRESENT(scale)) THEN
667!$OMP PARALLEL DO DEFAULT(NONE) &
668!$OMP PRIVATE(l, m, mn, n) &
669!$OMP SHARED(c, pw, scale)
670 DO gpt = 1, ngpts
671 l = mapl(ghat(1, gpt)) + 1
672 m = mapm(ghat(2, gpt)) + 1
673 n = mapn(ghat(3, gpt)) + 1
674 mn = yzq(m, n)
675 pw%array(gpt) = scale* real(c(l, mn), kind=dp)
676 END DO
677!$OMP END PARALLEL DO
678 ELSE
679!$OMP PARALLEL DO DEFAULT(NONE) &
680!$OMP PRIVATE(l, m, mn, n) &
681!$OMP SHARED(c, pw)
682 DO gpt = 1, ngpts
683 l = mapl(ghat(1, gpt)) + 1
684 m = mapm(ghat(2, gpt)) + 1
685 n = mapn(ghat(3, gpt)) + 1
686 mn = yzq(m, n)
687 pw%array(gpt) = real(c(l, mn), kind=dp)
688 END DO
689!$OMP END PARALLEL DO
690 END IF
691
692 END associate
693
694 CALL timestop(handle)
695
696 END SUBROUTINE pw_gather_p_r1d
697
698! **************************************************************************************************
699!> \brief ...
700!> \param pw ...
701!> \param c ...
702!> \param scale ...
703! **************************************************************************************************
704 SUBROUTINE pw_scatter_p_r1d (pw, c, scale)
705 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
706 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(INOUT) :: c
707 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
708
709 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scatter_p'
710
711 INTEGER :: gpt, handle, l, m, mn, n
712
713 CALL timeset(routinen, handle)
714
715 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
716 cpabort("This grid type is not distributed")
717 END IF
718
719 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
720 ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq, ngpts => SIZE(pw%pw_grid%gsq))
721
722 IF (.NOT. PRESENT(scale)) c = z_zero
723
724 IF (PRESENT(scale)) THEN
725!$OMP PARALLEL DO DEFAULT(NONE) &
726!$OMP PRIVATE(l, m, mn, n) &
727!$OMP SHARED(c, pw, scale)
728 DO gpt = 1, ngpts
729 l = mapl(ghat(1, gpt)) + 1
730 m = mapm(ghat(2, gpt)) + 1
731 n = mapn(ghat(3, gpt)) + 1
732 mn = yzq(m, n)
733 c(l, mn) = cmplx(scale*pw%array(gpt), 0.0_dp, kind=dp)
734 END DO
735!$OMP END PARALLEL DO
736 ELSE
737!$OMP PARALLEL DO DEFAULT(NONE) &
738!$OMP PRIVATE(l, m, mn, n) &
739!$OMP SHARED(c, pw)
740 DO gpt = 1, ngpts
741 l = mapl(ghat(1, gpt)) + 1
742 m = mapm(ghat(2, gpt)) + 1
743 n = mapn(ghat(3, gpt)) + 1
744 mn = yzq(m, n)
745 c(l, mn) = cmplx(pw%array(gpt), 0.0_dp, kind=dp)
746 END DO
747!$OMP END PARALLEL DO
748 END IF
749
750 END associate
751
752 IF (pw%pw_grid%grid_span == halfspace) THEN
753
754 associate(mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, mapl => pw%pw_grid%mapl%neg, &
755 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq), yzq => pw%pw_grid%para%yzq)
756
757 IF (PRESENT(scale)) THEN
758!$OMP PARALLEL DO DEFAULT(NONE) &
759!$OMP PRIVATE(l, m, mn, n) &
760!$OMP SHARED(c, pw, scale)
761 DO gpt = 1, ngpts
762 l = mapl(ghat(1, gpt)) + 1
763 m = mapm(ghat(2, gpt)) + 1
764 n = mapn(ghat(3, gpt)) + 1
765 mn = yzq(m, n)
766 c(l, mn) = scale*( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
767 END DO
768!$OMP END PARALLEL DO
769 ELSE
770!$OMP PARALLEL DO DEFAULT(NONE) &
771!$OMP PRIVATE(l, m, mn, n) &
772!$OMP SHARED(c, pw)
773 DO gpt = 1, ngpts
774 l = mapl(ghat(1, gpt)) + 1
775 m = mapm(ghat(2, gpt)) + 1
776 n = mapn(ghat(3, gpt)) + 1
777 mn = yzq(m, n)
778 c(l, mn) = ( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
779 END DO
780!$OMP END PARALLEL DO
781 END IF
782 END associate
783 END IF
784
785 CALL timestop(handle)
786
787 END SUBROUTINE pw_scatter_p_r1d
788! **************************************************************************************************
789!> \brief Set values of a pw type to zero
790!> \param pw ...
791!> \par History
792!> none
793!> \author apsi
794! **************************************************************************************************
795 SUBROUTINE pw_zero_r3d_rs (pw)
796
797 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw
798
799 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
800
801 INTEGER :: handle
802
803 CALL timeset(routinen, handle)
804
805#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
806!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
807 pw%array = 0.0_dp
808!$OMP END PARALLEL WORKSHARE
809#else
810 pw%array = 0.0_dp
811#endif
812
813 CALL timestop(handle)
814
815 END SUBROUTINE pw_zero_r3d_rs
816
817! **************************************************************************************************
818!> \brief multiplies pw coeffs with a number
819!> \param pw ...
820!> \param a ...
821!> \par History
822!> 11.2004 created [Joost VandeVondele]
823! **************************************************************************************************
824 SUBROUTINE pw_scale_r3d_rs (pw, a)
825
826 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw
827 REAL(KIND=dp), INTENT(IN) :: a
828
829 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
830
831 INTEGER :: handle
832
833 CALL timeset(routinen, handle)
834
835!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
836 pw%array = a*pw%array
837!$OMP END PARALLEL WORKSHARE
838
839 CALL timestop(handle)
840
841 END SUBROUTINE pw_scale_r3d_rs
842
843! **************************************************************************************************
844!> \brief writes a small description of the actual grid
845!> (change to output the data as cube file, maybe with an
846!> optional long_description arg?)
847!> \param pw the pw data to output
848!> \param unit_nr the unit to output to
849!> \par History
850!> 08.2002 created [fawzi]
851!> \author Fawzi Mohamed
852! **************************************************************************************************
853 SUBROUTINE pw_write_r3d_rs (pw, unit_nr)
854
855 TYPE(pw_r3d_rs_type), INTENT(in) :: pw
856 INTEGER, INTENT(in) :: unit_nr
857
858 INTEGER :: iostatus
859
860 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
861
862 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r3d"
863 IF (ASSOCIATED(pw%array)) THEN
864 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
865 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
866 lbound(pw%array, 3), ubound(pw%array, 3)
867 ELSE
868 WRITE (unit=unit_nr, fmt="(' array=*null*')")
869 END IF
870
871 END SUBROUTINE pw_write_r3d_rs
872
873! **************************************************************************************************
874!> \brief ...
875!> \param fun ...
876!> \param isign ...
877!> \param oprt ...
878!> \return ...
879! **************************************************************************************************
880 FUNCTION pw_integrate_function_r3d_rs (fun, isign, oprt) RESULT(total_fun)
881
882 TYPE(pw_r3d_rs_type), INTENT(IN) :: fun
883 INTEGER, INTENT(IN), OPTIONAL :: isign
884 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
885 REAL(kind=dp) :: total_fun
886
887 INTEGER :: iop
888
889 iop = 0
890
891 IF (PRESENT(oprt)) THEN
892 SELECT CASE (oprt)
893 CASE ("ABS", "abs")
894 iop = 1
895 CASE DEFAULT
896 cpabort("Unknown operator")
897 END SELECT
898 END IF
899
900 total_fun = 0.0_dp
901
902 ! do reduction using maximum accuracy
903 IF (iop == 1) THEN
904 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
905 ELSE
906 total_fun = fun%pw_grid%dvol*accurate_sum( fun%array)
907 END IF
908
909 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
910 CALL fun%pw_grid%para%group%sum(total_fun)
911 END IF
912
913 IF (PRESENT(isign)) THEN
914 total_fun = total_fun*sign(1._dp, real(isign, dp))
915 END IF
916
917 END FUNCTION pw_integrate_function_r3d_rs
918
919! **************************************************************************************************
920!> \brief ...
921!> \param pw ...
922!> \param value ...
923! **************************************************************************************************
924 SUBROUTINE pw_set_value_r3d_rs (pw, value)
925 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
926 REAL(KIND=dp), INTENT(IN) :: value
927
928 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
929
930 INTEGER :: handle
931
932 CALL timeset(routinen, handle)
933
934!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
935 pw%array = value
936!$OMP END PARALLEL WORKSHARE
937
938 CALL timestop(handle)
939
940 END SUBROUTINE pw_set_value_r3d_rs
941! **************************************************************************************************
942!> \brief Set values of a pw type to zero
943!> \param pw ...
944!> \par History
945!> none
946!> \author apsi
947! **************************************************************************************************
948 SUBROUTINE pw_zero_r3d_gs (pw)
949
950 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw
951
952 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
953
954 INTEGER :: handle
955
956 CALL timeset(routinen, handle)
957
958#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
959!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
960 pw%array = 0.0_dp
961!$OMP END PARALLEL WORKSHARE
962#else
963 pw%array = 0.0_dp
964#endif
965
966 CALL timestop(handle)
967
968 END SUBROUTINE pw_zero_r3d_gs
969
970! **************************************************************************************************
971!> \brief multiplies pw coeffs with a number
972!> \param pw ...
973!> \param a ...
974!> \par History
975!> 11.2004 created [Joost VandeVondele]
976! **************************************************************************************************
977 SUBROUTINE pw_scale_r3d_gs (pw, a)
978
979 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw
980 REAL(KIND=dp), INTENT(IN) :: a
981
982 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
983
984 INTEGER :: handle
985
986 CALL timeset(routinen, handle)
987
988!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
989 pw%array = a*pw%array
990!$OMP END PARALLEL WORKSHARE
991
992 CALL timestop(handle)
993
994 END SUBROUTINE pw_scale_r3d_gs
995
996! **************************************************************************************************
997!> \brief writes a small description of the actual grid
998!> (change to output the data as cube file, maybe with an
999!> optional long_description arg?)
1000!> \param pw the pw data to output
1001!> \param unit_nr the unit to output to
1002!> \par History
1003!> 08.2002 created [fawzi]
1004!> \author Fawzi Mohamed
1005! **************************************************************************************************
1006 SUBROUTINE pw_write_r3d_gs (pw, unit_nr)
1007
1008 TYPE(pw_r3d_gs_type), INTENT(in) :: pw
1009 INTEGER, INTENT(in) :: unit_nr
1010
1011 INTEGER :: iostatus
1012
1013 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1014
1015 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r3d"
1016 IF (ASSOCIATED(pw%array)) THEN
1017 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1018 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1019 lbound(pw%array, 3), ubound(pw%array, 3)
1020 ELSE
1021 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1022 END IF
1023
1024 END SUBROUTINE pw_write_r3d_gs
1025
1026! **************************************************************************************************
1027!> \brief ...
1028!> \param fun ...
1029!> \param isign ...
1030!> \param oprt ...
1031!> \return ...
1032! **************************************************************************************************
1033 FUNCTION pw_integrate_function_r3d_gs (fun, isign, oprt) RESULT(total_fun)
1034
1035 TYPE(pw_r3d_gs_type), INTENT(IN) :: fun
1036 INTEGER, INTENT(IN), OPTIONAL :: isign
1037 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1038 REAL(kind=dp) :: total_fun
1039
1040 INTEGER :: iop
1041
1042 iop = 0
1043
1044 IF (PRESENT(oprt)) THEN
1045 SELECT CASE (oprt)
1046 CASE ("ABS", "abs")
1047 iop = 1
1048 CASE DEFAULT
1049 cpabort("Unknown operator")
1050 END SELECT
1051 END IF
1052
1053 total_fun = 0.0_dp
1054
1055 IF (iop == 1) &
1056 cpabort("Operator ABS not implemented")
1057 cpabort("Reciprocal space integration for 3D grids not implemented!")
1058
1059 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1060 CALL fun%pw_grid%para%group%sum(total_fun)
1061 END IF
1062
1063 IF (PRESENT(isign)) THEN
1064 total_fun = total_fun*sign(1._dp, real(isign, dp))
1065 END IF
1066
1067 END FUNCTION pw_integrate_function_r3d_gs
1068
1069! **************************************************************************************************
1070!> \brief ...
1071!> \param pw ...
1072!> \param value ...
1073! **************************************************************************************************
1074 SUBROUTINE pw_set_value_r3d_gs (pw, value)
1075 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
1076 REAL(KIND=dp), INTENT(IN) :: value
1077
1078 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1079
1080 INTEGER :: handle
1081
1082 CALL timeset(routinen, handle)
1083
1084!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1085 pw%array = value
1086!$OMP END PARALLEL WORKSHARE
1087
1088 CALL timestop(handle)
1089
1090 END SUBROUTINE pw_set_value_r3d_gs
1091
1092! **************************************************************************************************
1093!> \brief Set values of a pw type to zero
1094!> \param pw ...
1095!> \par History
1096!> none
1097!> \author apsi
1098! **************************************************************************************************
1099 SUBROUTINE pw_zero_c1d_rs (pw)
1100
1101 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw
1102
1103 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1104
1105 INTEGER :: handle
1106
1107 CALL timeset(routinen, handle)
1108
1109#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1110!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1111 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1112!$OMP END PARALLEL WORKSHARE
1113#else
1114 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1115#endif
1116
1117 CALL timestop(handle)
1118
1119 END SUBROUTINE pw_zero_c1d_rs
1120
1121! **************************************************************************************************
1122!> \brief multiplies pw coeffs with a number
1123!> \param pw ...
1124!> \param a ...
1125!> \par History
1126!> 11.2004 created [Joost VandeVondele]
1127! **************************************************************************************************
1128 SUBROUTINE pw_scale_c1d_rs (pw, a)
1129
1130 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw
1131 REAL(KIND=dp), INTENT(IN) :: a
1132
1133 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1134
1135 INTEGER :: handle
1136
1137 CALL timeset(routinen, handle)
1138
1139!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1140 pw%array = a*pw%array
1141!$OMP END PARALLEL WORKSHARE
1142
1143 CALL timestop(handle)
1144
1145 END SUBROUTINE pw_scale_c1d_rs
1146
1147! **************************************************************************************************
1148!> \brief writes a small description of the actual grid
1149!> (change to output the data as cube file, maybe with an
1150!> optional long_description arg?)
1151!> \param pw the pw data to output
1152!> \param unit_nr the unit to output to
1153!> \par History
1154!> 08.2002 created [fawzi]
1155!> \author Fawzi Mohamed
1156! **************************************************************************************************
1157 SUBROUTINE pw_write_c1d_rs (pw, unit_nr)
1158
1159 TYPE(pw_c1d_rs_type), INTENT(in) :: pw
1160 INTEGER, INTENT(in) :: unit_nr
1161
1162 INTEGER :: iostatus
1163
1164 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1165
1166 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c1d"
1167 IF (ASSOCIATED(pw%array)) THEN
1168 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,')>')") &
1169 lbound(pw%array, 1), ubound(pw%array, 1)
1170 ELSE
1171 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1172 END IF
1173
1174 END SUBROUTINE pw_write_c1d_rs
1175
1176! **************************************************************************************************
1177!> \brief ...
1178!> \param fun ...
1179!> \param isign ...
1180!> \param oprt ...
1181!> \return ...
1182! **************************************************************************************************
1183 FUNCTION pw_integrate_function_c1d_rs (fun, isign, oprt) RESULT(total_fun)
1184
1185 TYPE(pw_c1d_rs_type), INTENT(IN) :: fun
1186 INTEGER, INTENT(IN), OPTIONAL :: isign
1187 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1188 REAL(kind=dp) :: total_fun
1189
1190 INTEGER :: iop
1191
1192 iop = 0
1193
1194 IF (PRESENT(oprt)) THEN
1195 SELECT CASE (oprt)
1196 CASE ("ABS", "abs")
1197 iop = 1
1198 CASE DEFAULT
1199 cpabort("Unknown operator")
1200 END SELECT
1201 END IF
1202
1203 total_fun = 0.0_dp
1204
1205 ! do reduction using maximum accuracy
1206 IF (iop == 1) THEN
1207 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
1208 ELSE
1209 total_fun = fun%pw_grid%dvol*accurate_sum( real(fun%array, kind=dp))
1210 END IF
1211
1212 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1213 CALL fun%pw_grid%para%group%sum(total_fun)
1214 END IF
1215
1216 IF (PRESENT(isign)) THEN
1217 total_fun = total_fun*sign(1._dp, real(isign, dp))
1218 END IF
1219
1220 END FUNCTION pw_integrate_function_c1d_rs
1221
1222! **************************************************************************************************
1223!> \brief ...
1224!> \param pw ...
1225!> \param value ...
1226! **************************************************************************************************
1227 SUBROUTINE pw_set_value_c1d_rs (pw, value)
1228 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
1229 REAL(KIND=dp), INTENT(IN) :: value
1230
1231 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1232
1233 INTEGER :: handle
1234
1235 CALL timeset(routinen, handle)
1236
1237!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1238 pw%array = cmplx(value, 0.0_dp, kind=dp)
1239!$OMP END PARALLEL WORKSHARE
1240
1241 CALL timestop(handle)
1242
1243 END SUBROUTINE pw_set_value_c1d_rs
1244! **************************************************************************************************
1245!> \brief Set values of a pw type to zero
1246!> \param pw ...
1247!> \par History
1248!> none
1249!> \author apsi
1250! **************************************************************************************************
1251 SUBROUTINE pw_zero_c1d_gs (pw)
1252
1253 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1254
1255 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1256
1257 INTEGER :: handle
1258
1259 CALL timeset(routinen, handle)
1260
1261#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1262!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1263 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1264!$OMP END PARALLEL WORKSHARE
1265#else
1266 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1267#endif
1268
1269 CALL timestop(handle)
1270
1271 END SUBROUTINE pw_zero_c1d_gs
1272
1273! **************************************************************************************************
1274!> \brief multiplies pw coeffs with a number
1275!> \param pw ...
1276!> \param a ...
1277!> \par History
1278!> 11.2004 created [Joost VandeVondele]
1279! **************************************************************************************************
1280 SUBROUTINE pw_scale_c1d_gs (pw, a)
1281
1282 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1283 REAL(KIND=dp), INTENT(IN) :: a
1284
1285 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1286
1287 INTEGER :: handle
1288
1289 CALL timeset(routinen, handle)
1290
1291!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1292 pw%array = a*pw%array
1293!$OMP END PARALLEL WORKSHARE
1294
1295 CALL timestop(handle)
1296
1297 END SUBROUTINE pw_scale_c1d_gs
1298
1299! **************************************************************************************************
1300!> \brief writes a small description of the actual grid
1301!> (change to output the data as cube file, maybe with an
1302!> optional long_description arg?)
1303!> \param pw the pw data to output
1304!> \param unit_nr the unit to output to
1305!> \par History
1306!> 08.2002 created [fawzi]
1307!> \author Fawzi Mohamed
1308! **************************************************************************************************
1309 SUBROUTINE pw_write_c1d_gs (pw, unit_nr)
1310
1311 TYPE(pw_c1d_gs_type), INTENT(in) :: pw
1312 INTEGER, INTENT(in) :: unit_nr
1313
1314 INTEGER :: iostatus
1315
1316 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1317
1318 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c1d"
1319 IF (ASSOCIATED(pw%array)) THEN
1320 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,')>')") &
1321 lbound(pw%array, 1), ubound(pw%array, 1)
1322 ELSE
1323 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1324 END IF
1325
1326 END SUBROUTINE pw_write_c1d_gs
1327
1328! **************************************************************************************************
1329!> \brief ...
1330!> \param fun ...
1331!> \param isign ...
1332!> \param oprt ...
1333!> \return ...
1334! **************************************************************************************************
1335 FUNCTION pw_integrate_function_c1d_gs (fun, isign, oprt) RESULT(total_fun)
1336
1337 TYPE(pw_c1d_gs_type), INTENT(IN) :: fun
1338 INTEGER, INTENT(IN), OPTIONAL :: isign
1339 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1340 REAL(kind=dp) :: total_fun
1341
1342 INTEGER :: iop
1343
1344 iop = 0
1345
1346 IF (PRESENT(oprt)) THEN
1347 SELECT CASE (oprt)
1348 CASE ("ABS", "abs")
1349 iop = 1
1350 CASE DEFAULT
1351 cpabort("Unknown operator")
1352 END SELECT
1353 END IF
1354
1355 total_fun = 0.0_dp
1356
1357 IF (iop == 1) &
1358 cpabort("Operator ABS not implemented")
1359 IF (fun%pw_grid%have_g0) total_fun = fun%pw_grid%vol* real(fun%array(1), kind=dp)
1360
1361 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1362 CALL fun%pw_grid%para%group%sum(total_fun)
1363 END IF
1364
1365 IF (PRESENT(isign)) THEN
1366 total_fun = total_fun*sign(1._dp, real(isign, dp))
1367 END IF
1368
1369 END FUNCTION pw_integrate_function_c1d_gs
1370
1371! **************************************************************************************************
1372!> \brief ...
1373!> \param pw ...
1374!> \param value ...
1375! **************************************************************************************************
1376 SUBROUTINE pw_set_value_c1d_gs (pw, value)
1377 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
1378 REAL(KIND=dp), INTENT(IN) :: value
1379
1380 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1381
1382 INTEGER :: handle
1383
1384 CALL timeset(routinen, handle)
1385
1386!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1387 pw%array = cmplx(value, 0.0_dp, kind=dp)
1388!$OMP END PARALLEL WORKSHARE
1389
1390 CALL timestop(handle)
1391
1392 END SUBROUTINE pw_set_value_c1d_gs
1393
1394! **************************************************************************************************
1395!> \brief ...
1396!> \param pw ...
1397!> \param c ...
1398!> \param scale ...
1399! **************************************************************************************************
1400 SUBROUTINE pw_gather_p_c1d (pw, c, scale)
1401
1402 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1403 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(IN) :: c
1404 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
1405
1406 CHARACTER(len=*), PARAMETER :: routineN = 'pw_gather_p'
1407
1408 INTEGER :: gpt, handle, l, m, mn, n
1409
1410 CALL timeset(routinen, handle)
1411
1412 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
1413 cpabort("This grid type is not distributed")
1414 END IF
1415
1416 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
1417 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq)
1418
1419 IF (PRESENT(scale)) THEN
1420!$OMP PARALLEL DO DEFAULT(NONE) &
1421!$OMP PRIVATE(l, m, mn, n) &
1422!$OMP SHARED(c, pw, scale)
1423 DO gpt = 1, ngpts
1424 l = mapl(ghat(1, gpt)) + 1
1425 m = mapm(ghat(2, gpt)) + 1
1426 n = mapn(ghat(3, gpt)) + 1
1427 mn = yzq(m, n)
1428 pw%array(gpt) = scale* c(l, mn)
1429 END DO
1430!$OMP END PARALLEL DO
1431 ELSE
1432!$OMP PARALLEL DO DEFAULT(NONE) &
1433!$OMP PRIVATE(l, m, mn, n) &
1434!$OMP SHARED(c, pw)
1435 DO gpt = 1, ngpts
1436 l = mapl(ghat(1, gpt)) + 1
1437 m = mapm(ghat(2, gpt)) + 1
1438 n = mapn(ghat(3, gpt)) + 1
1439 mn = yzq(m, n)
1440 pw%array(gpt) = c(l, mn)
1441 END DO
1442!$OMP END PARALLEL DO
1443 END IF
1444
1445 END associate
1446
1447 CALL timestop(handle)
1448
1449 END SUBROUTINE pw_gather_p_c1d
1450
1451! **************************************************************************************************
1452!> \brief ...
1453!> \param pw ...
1454!> \param c ...
1455!> \param scale ...
1456! **************************************************************************************************
1457 SUBROUTINE pw_scatter_p_c1d (pw, c, scale)
1458 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
1459 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(INOUT) :: c
1460 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
1461
1462 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scatter_p'
1463
1464 INTEGER :: gpt, handle, l, m, mn, n
1465
1466 CALL timeset(routinen, handle)
1467
1468 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
1469 cpabort("This grid type is not distributed")
1470 END IF
1471
1472 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
1473 ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq, ngpts => SIZE(pw%pw_grid%gsq))
1474
1475 IF (.NOT. PRESENT(scale)) c = z_zero
1476
1477 IF (PRESENT(scale)) THEN
1478!$OMP PARALLEL DO DEFAULT(NONE) &
1479!$OMP PRIVATE(l, m, mn, n) &
1480!$OMP SHARED(c, pw, scale)
1481 DO gpt = 1, ngpts
1482 l = mapl(ghat(1, gpt)) + 1
1483 m = mapm(ghat(2, gpt)) + 1
1484 n = mapn(ghat(3, gpt)) + 1
1485 mn = yzq(m, n)
1486 c(l, mn) = scale*pw%array(gpt)
1487 END DO
1488!$OMP END PARALLEL DO
1489 ELSE
1490!$OMP PARALLEL DO DEFAULT(NONE) &
1491!$OMP PRIVATE(l, m, mn, n) &
1492!$OMP SHARED(c, pw)
1493 DO gpt = 1, ngpts
1494 l = mapl(ghat(1, gpt)) + 1
1495 m = mapm(ghat(2, gpt)) + 1
1496 n = mapn(ghat(3, gpt)) + 1
1497 mn = yzq(m, n)
1498 c(l, mn) = pw%array(gpt)
1499 END DO
1500!$OMP END PARALLEL DO
1501 END IF
1502
1503 END associate
1504
1505 IF (pw%pw_grid%grid_span == halfspace) THEN
1506
1507 associate(mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, mapl => pw%pw_grid%mapl%neg, &
1508 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq), yzq => pw%pw_grid%para%yzq)
1509
1510 IF (PRESENT(scale)) THEN
1511!$OMP PARALLEL DO DEFAULT(NONE) &
1512!$OMP PRIVATE(l, m, mn, n) &
1513!$OMP SHARED(c, pw, scale)
1514 DO gpt = 1, ngpts
1515 l = mapl(ghat(1, gpt)) + 1
1516 m = mapm(ghat(2, gpt)) + 1
1517 n = mapn(ghat(3, gpt)) + 1
1518 mn = yzq(m, n)
1519 c(l, mn) = scale*conjg( pw%array(gpt))
1520 END DO
1521!$OMP END PARALLEL DO
1522 ELSE
1523!$OMP PARALLEL DO DEFAULT(NONE) &
1524!$OMP PRIVATE(l, m, mn, n) &
1525!$OMP SHARED(c, pw)
1526 DO gpt = 1, ngpts
1527 l = mapl(ghat(1, gpt)) + 1
1528 m = mapm(ghat(2, gpt)) + 1
1529 n = mapn(ghat(3, gpt)) + 1
1530 mn = yzq(m, n)
1531 c(l, mn) = conjg( pw%array(gpt))
1532 END DO
1533!$OMP END PARALLEL DO
1534 END IF
1535 END associate
1536 END IF
1537
1538 CALL timestop(handle)
1539
1540 END SUBROUTINE pw_scatter_p_c1d
1541! **************************************************************************************************
1542!> \brief Set values of a pw type to zero
1543!> \param pw ...
1544!> \par History
1545!> none
1546!> \author apsi
1547! **************************************************************************************************
1548 SUBROUTINE pw_zero_c3d_rs (pw)
1549
1550 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw
1551
1552 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1553
1554 INTEGER :: handle
1555
1556 CALL timeset(routinen, handle)
1557
1558#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1559!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1560 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1561!$OMP END PARALLEL WORKSHARE
1562#else
1563 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1564#endif
1565
1566 CALL timestop(handle)
1567
1568 END SUBROUTINE pw_zero_c3d_rs
1569
1570! **************************************************************************************************
1571!> \brief multiplies pw coeffs with a number
1572!> \param pw ...
1573!> \param a ...
1574!> \par History
1575!> 11.2004 created [Joost VandeVondele]
1576! **************************************************************************************************
1577 SUBROUTINE pw_scale_c3d_rs (pw, a)
1578
1579 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw
1580 REAL(KIND=dp), INTENT(IN) :: a
1581
1582 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1583
1584 INTEGER :: handle
1585
1586 CALL timeset(routinen, handle)
1587
1588!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1589 pw%array = a*pw%array
1590!$OMP END PARALLEL WORKSHARE
1591
1592 CALL timestop(handle)
1593
1594 END SUBROUTINE pw_scale_c3d_rs
1595
1596! **************************************************************************************************
1597!> \brief writes a small description of the actual grid
1598!> (change to output the data as cube file, maybe with an
1599!> optional long_description arg?)
1600!> \param pw the pw data to output
1601!> \param unit_nr the unit to output to
1602!> \par History
1603!> 08.2002 created [fawzi]
1604!> \author Fawzi Mohamed
1605! **************************************************************************************************
1606 SUBROUTINE pw_write_c3d_rs (pw, unit_nr)
1607
1608 TYPE(pw_c3d_rs_type), INTENT(in) :: pw
1609 INTEGER, INTENT(in) :: unit_nr
1610
1611 INTEGER :: iostatus
1612
1613 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1614
1615 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c3d"
1616 IF (ASSOCIATED(pw%array)) THEN
1617 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1618 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1619 lbound(pw%array, 3), ubound(pw%array, 3)
1620 ELSE
1621 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1622 END IF
1623
1624 END SUBROUTINE pw_write_c3d_rs
1625
1626! **************************************************************************************************
1627!> \brief ...
1628!> \param fun ...
1629!> \param isign ...
1630!> \param oprt ...
1631!> \return ...
1632! **************************************************************************************************
1633 FUNCTION pw_integrate_function_c3d_rs (fun, isign, oprt) RESULT(total_fun)
1634
1635 TYPE(pw_c3d_rs_type), INTENT(IN) :: fun
1636 INTEGER, INTENT(IN), OPTIONAL :: isign
1637 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1638 REAL(kind=dp) :: total_fun
1639
1640 INTEGER :: iop
1641
1642 iop = 0
1643
1644 IF (PRESENT(oprt)) THEN
1645 SELECT CASE (oprt)
1646 CASE ("ABS", "abs")
1647 iop = 1
1648 CASE DEFAULT
1649 cpabort("Unknown operator")
1650 END SELECT
1651 END IF
1652
1653 total_fun = 0.0_dp
1654
1655 ! do reduction using maximum accuracy
1656 IF (iop == 1) THEN
1657 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
1658 ELSE
1659 total_fun = fun%pw_grid%dvol*accurate_sum( real(fun%array, kind=dp))
1660 END IF
1661
1662 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1663 CALL fun%pw_grid%para%group%sum(total_fun)
1664 END IF
1665
1666 IF (PRESENT(isign)) THEN
1667 total_fun = total_fun*sign(1._dp, real(isign, dp))
1668 END IF
1669
1670 END FUNCTION pw_integrate_function_c3d_rs
1671
1672! **************************************************************************************************
1673!> \brief ...
1674!> \param pw ...
1675!> \param value ...
1676! **************************************************************************************************
1677 SUBROUTINE pw_set_value_c3d_rs (pw, value)
1678 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
1679 REAL(KIND=dp), INTENT(IN) :: value
1680
1681 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1682
1683 INTEGER :: handle
1684
1685 CALL timeset(routinen, handle)
1686
1687!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1688 pw%array = cmplx(value, 0.0_dp, kind=dp)
1689!$OMP END PARALLEL WORKSHARE
1690
1691 CALL timestop(handle)
1692
1693 END SUBROUTINE pw_set_value_c3d_rs
1694! **************************************************************************************************
1695!> \brief Set values of a pw type to zero
1696!> \param pw ...
1697!> \par History
1698!> none
1699!> \author apsi
1700! **************************************************************************************************
1701 SUBROUTINE pw_zero_c3d_gs (pw)
1702
1703 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw
1704
1705 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1706
1707 INTEGER :: handle
1708
1709 CALL timeset(routinen, handle)
1710
1711#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
1712!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1713 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1714!$OMP END PARALLEL WORKSHARE
1715#else
1716 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1717#endif
1718
1719 CALL timestop(handle)
1720
1721 END SUBROUTINE pw_zero_c3d_gs
1722
1723! **************************************************************************************************
1724!> \brief multiplies pw coeffs with a number
1725!> \param pw ...
1726!> \param a ...
1727!> \par History
1728!> 11.2004 created [Joost VandeVondele]
1729! **************************************************************************************************
1730 SUBROUTINE pw_scale_c3d_gs (pw, a)
1731
1732 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw
1733 REAL(KIND=dp), INTENT(IN) :: a
1734
1735 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1736
1737 INTEGER :: handle
1738
1739 CALL timeset(routinen, handle)
1740
1741!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1742 pw%array = a*pw%array
1743!$OMP END PARALLEL WORKSHARE
1744
1745 CALL timestop(handle)
1746
1747 END SUBROUTINE pw_scale_c3d_gs
1748
1749! **************************************************************************************************
1750!> \brief writes a small description of the actual grid
1751!> (change to output the data as cube file, maybe with an
1752!> optional long_description arg?)
1753!> \param pw the pw data to output
1754!> \param unit_nr the unit to output to
1755!> \par History
1756!> 08.2002 created [fawzi]
1757!> \author Fawzi Mohamed
1758! **************************************************************************************************
1759 SUBROUTINE pw_write_c3d_gs (pw, unit_nr)
1760
1761 TYPE(pw_c3d_gs_type), INTENT(in) :: pw
1762 INTEGER, INTENT(in) :: unit_nr
1763
1764 INTEGER :: iostatus
1765
1766 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1767
1768 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c3d"
1769 IF (ASSOCIATED(pw%array)) THEN
1770 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1771 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1772 lbound(pw%array, 3), ubound(pw%array, 3)
1773 ELSE
1774 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1775 END IF
1776
1777 END SUBROUTINE pw_write_c3d_gs
1778
1779! **************************************************************************************************
1780!> \brief ...
1781!> \param fun ...
1782!> \param isign ...
1783!> \param oprt ...
1784!> \return ...
1785! **************************************************************************************************
1786 FUNCTION pw_integrate_function_c3d_gs (fun, isign, oprt) RESULT(total_fun)
1787
1788 TYPE(pw_c3d_gs_type), INTENT(IN) :: fun
1789 INTEGER, INTENT(IN), OPTIONAL :: isign
1790 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1791 REAL(kind=dp) :: total_fun
1792
1793 INTEGER :: iop
1794
1795 iop = 0
1796
1797 IF (PRESENT(oprt)) THEN
1798 SELECT CASE (oprt)
1799 CASE ("ABS", "abs")
1800 iop = 1
1801 CASE DEFAULT
1802 cpabort("Unknown operator")
1803 END SELECT
1804 END IF
1805
1806 total_fun = 0.0_dp
1807
1808 IF (iop == 1) &
1809 cpabort("Operator ABS not implemented")
1810 cpabort("Reciprocal space integration for 3D grids not implemented!")
1811
1812 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1813 CALL fun%pw_grid%para%group%sum(total_fun)
1814 END IF
1815
1816 IF (PRESENT(isign)) THEN
1817 total_fun = total_fun*sign(1._dp, real(isign, dp))
1818 END IF
1819
1820 END FUNCTION pw_integrate_function_c3d_gs
1821
1822! **************************************************************************************************
1823!> \brief ...
1824!> \param pw ...
1825!> \param value ...
1826! **************************************************************************************************
1827 SUBROUTINE pw_set_value_c3d_gs (pw, value)
1828 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
1829 REAL(KIND=dp), INTENT(IN) :: value
1830
1831 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1832
1833 INTEGER :: handle
1834
1835 CALL timeset(routinen, handle)
1836
1837!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1838 pw%array = cmplx(value, 0.0_dp, kind=dp)
1839!$OMP END PARALLEL WORKSHARE
1840
1841 CALL timestop(handle)
1842
1843 END SUBROUTINE pw_set_value_c3d_gs
1844
1845
1846! **************************************************************************************************
1847!> \brief copy a pw type variable
1848!> \param pw1 ...
1849!> \param pw2 ...
1850!> \par History
1851!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
1852!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
1853!> JGH (21-Feb-2003) : Code for generalized reference grids
1854!> \author apsi
1855!> \note
1856!> Currently only copying of respective types allowed,
1857!> in order to avoid errors
1858! **************************************************************************************************
1859 SUBROUTINE pw_copy_r1d_r1d_rs (pw1, pw2)
1860
1861 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
1862 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
1863
1864 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
1865
1866 INTEGER :: handle
1867 INTEGER :: i, j, ng, ng1, ng2, ns
1868
1869 CALL timeset(routinen, handle)
1870
1871 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
1872 cpabort("Both grids must be either spherical or non-spherical!")
1873 IF (pw1%pw_grid%spherical) &
1874 cpabort("Spherical grids only exist in reciprocal space!")
1875
1876 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
1877 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
1878 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
1879 ng1 = SIZE(pw1%array)
1880 ng2 = SIZE(pw2%array)
1881 ng = min(ng1, ng2)
1882!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
1883 pw2%array(1:ng) = pw1%array(1:ng)
1884!$OMP END PARALLEL WORKSHARE
1885 IF (ng2 > ng) THEN
1886!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
1887 pw2%array(ng + 1:ng2) = 0.0_dp
1888!$OMP END PARALLEL WORKSHARE
1889 END IF
1890 ELSE
1891 cpabort("Copies between spherical grids require compatible grids!")
1892 END IF
1893 ELSE
1894 ng1 = SIZE(pw1%array)
1895 ng2 = SIZE(pw2%array)
1896 ns = 2*max(ng1, ng2)
1897
1898 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
1899 IF (ng1 >= ng2) THEN
1900!$OMP PARALLEL DO DEFAULT(NONE) &
1901!$OMP PRIVATE(i,j) &
1902!$OMP SHARED(ng2, pw1, pw2)
1903 DO i = 1, ng2
1904 j = pw2%pw_grid%gidx(i)
1905 pw2%array(i) = pw1%array(j)
1906 END DO
1907!$OMP END PARALLEL DO
1908 ELSE
1909 CALL pw_zero(pw2)
1910!$OMP PARALLEL DO DEFAULT(NONE) &
1911!$OMP PRIVATE(i,j) &
1912!$OMP SHARED(ng1, pw1, pw2)
1913 DO i = 1, ng1
1914 j = pw2%pw_grid%gidx(i)
1915 pw2%array(j) = pw1%array(i)
1916 END DO
1917!$OMP END PARALLEL DO
1918 END IF
1919 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
1920 IF (ng1 >= ng2) THEN
1921!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
1922 DO i = 1, ng2
1923 j = pw1%pw_grid%gidx(i)
1924 pw2%array(i) = pw1%array(j)
1925 END DO
1926!$OMP END PARALLEL DO
1927 ELSE
1928 CALL pw_zero(pw2)
1929!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
1930 DO i = 1, ng1
1931 j = pw1%pw_grid%gidx(i)
1932 pw2%array(j) = pw1%array(i)
1933 END DO
1934!$OMP END PARALLEL DO
1935 END IF
1936 ELSE
1937 cpabort("Copy not implemented!")
1938 END IF
1939
1940 END IF
1941
1942 ELSE
1943!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
1944 pw2%array = pw1%array
1945!$OMP END PARALLEL WORKSHARE
1946 END IF
1947
1948 CALL timestop(handle)
1949
1950 END SUBROUTINE pw_copy_r1d_r1d_rs
1951
1952! **************************************************************************************************
1953!> \brief ...
1954!> \param pw ...
1955!> \param array ...
1956! **************************************************************************************************
1957 SUBROUTINE pw_copy_to_array_r1d_r1d_rs (pw, array)
1958 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
1959 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
1960
1961 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
1962
1963 INTEGER :: handle
1964
1965 CALL timeset(routinen, handle)
1966
1967!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
1968 array(:) = pw%array(:)
1969!$OMP END PARALLEL WORKSHARE
1970
1971 CALL timestop(handle)
1972 END SUBROUTINE pw_copy_to_array_r1d_r1d_rs
1973
1974! **************************************************************************************************
1975!> \brief ...
1976!> \param pw ...
1977!> \param array ...
1978! **************************************************************************************************
1979 SUBROUTINE pw_copy_from_array_r1d_r1d_rs (pw, array)
1980 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
1981 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
1982
1983 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
1984
1985 INTEGER :: handle
1986
1987 CALL timeset(routinen, handle)
1988
1989!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
1990 pw%array = array
1991!$OMP END PARALLEL WORKSHARE
1992
1993 CALL timestop(handle)
1994 END SUBROUTINE pw_copy_from_array_r1d_r1d_rs
1995
1996! **************************************************************************************************
1997!> \brief pw2 = alpha*pw1 + beta*pw2
1998!> alpha defaults to 1, beta defaults to 1
1999!> \param pw1 ...
2000!> \param pw2 ...
2001!> \param alpha ...
2002!> \param beta ...
2003!> \param allow_noncompatible_grids ...
2004!> \par History
2005!> JGH (21-Feb-2003) : added reference grid functionality
2006!> JGH (01-Dec-2007) : rename and remove complex alpha
2007!> \author apsi
2008!> \note
2009!> Currently only summing up of respective types allowed,
2010!> in order to avoid errors
2011! **************************************************************************************************
2012 SUBROUTINE pw_axpy_r1d_r1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2013
2014 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2015 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
2016 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2017 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2018
2019 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2020
2021 INTEGER :: handle
2022 LOGICAL :: my_allow_noncompatible_grids
2023 REAL(KIND=dp) :: my_alpha, my_beta
2024 INTEGER :: i, j, ng, ng1, ng2
2025
2026 CALL timeset(routinen, handle)
2027
2028 my_alpha = 1.0_dp
2029 IF (PRESENT(alpha)) my_alpha = alpha
2030
2031 my_beta = 1.0_dp
2032 IF (PRESENT(beta)) my_beta = beta
2033
2034 my_allow_noncompatible_grids = .false.
2035 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2036
2037 IF (my_beta /= 1.0_dp) THEN
2038 IF (my_beta == 0.0_dp) THEN
2039 CALL pw_zero(pw2)
2040 ELSE
2041!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
2042 pw2%array = pw2%array*my_beta
2043!$OMP END PARALLEL WORKSHARE
2044 END IF
2045 END IF
2046
2047 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2048
2049 IF (my_alpha == 1.0_dp) THEN
2050!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2051 pw2%array = pw2%array + pw1%array
2052!$OMP END PARALLEL WORKSHARE
2053 ELSE IF (my_alpha /= 0.0_dp) THEN
2054!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
2055 pw2%array = pw2%array + my_alpha* pw1%array
2056!$OMP END PARALLEL WORKSHARE
2057 END IF
2058
2059 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
2060
2061 ng1 = SIZE(pw1%array)
2062 ng2 = SIZE(pw2%array)
2063 ng = min(ng1, ng2)
2064
2065 IF (pw1%pw_grid%spherical) THEN
2066 IF (my_alpha == 1.0_dp) THEN
2067!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2068 DO i = 1, ng
2069 pw2%array(i) = pw2%array(i) + pw1%array(i)
2070 END DO
2071!$OMP END PARALLEL DO
2072 ELSE IF (my_alpha /= 0.0_dp) THEN
2073!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
2074 DO i = 1, ng
2075 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
2076 END DO
2077!$OMP END PARALLEL DO
2078 END IF
2079 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2080 IF (ng1 >= ng2) THEN
2081 IF (my_alpha == 1.0_dp) THEN
2082!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2083 DO i = 1, ng
2084 j = pw2%pw_grid%gidx(i)
2085 pw2%array(i) = pw2%array(i) + pw1%array(j)
2086 END DO
2087!$OMP END PARALLEL DO
2088 ELSE IF (my_alpha /= 0.0_dp) THEN
2089!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2090 DO i = 1, ng
2091 j = pw2%pw_grid%gidx(i)
2092 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2093 END DO
2094!$OMP END PARALLEL DO
2095 END IF
2096 ELSE
2097 IF (my_alpha == 1.0_dp) THEN
2098!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2099 DO i = 1, ng
2100 j = pw2%pw_grid%gidx(i)
2101 pw2%array(j) = pw2%array(j) + pw1%array(i)
2102 END DO
2103!$OMP END PARALLEL DO
2104 ELSE IF (my_alpha /= 0.0_dp) THEN
2105!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2106 DO i = 1, ng
2107 j = pw2%pw_grid%gidx(i)
2108 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2109 END DO
2110!$OMP END PARALLEL DO
2111 END IF
2112 END IF
2113 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2114 IF (ng1 >= ng2) THEN
2115 IF (my_alpha == 1.0_dp) THEN
2116!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2117 DO i = 1, ng
2118 j = pw1%pw_grid%gidx(i)
2119 pw2%array(i) = pw2%array(i) + pw1%array(j)
2120 END DO
2121!$OMP END PARALLEL DO
2122 ELSE IF (my_alpha /= 0.0_dp) THEN
2123!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2124 DO i = 1, ng
2125 j = pw1%pw_grid%gidx(i)
2126 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2127 END DO
2128!$OMP END PARALLEL DO
2129 END IF
2130 ELSE
2131 IF (my_alpha == 1.0_dp) THEN
2132!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2133 DO i = 1, ng
2134 j = pw1%pw_grid%gidx(i)
2135 pw2%array(j) = pw2%array(j) + pw1%array(i)
2136 END DO
2137!$OMP END PARALLEL DO
2138 ELSE IF (my_alpha /= 0.0_dp) THEN
2139!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2140 DO i = 1, ng
2141 j = pw1%pw_grid%gidx(i)
2142 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2143 END DO
2144!$OMP END PARALLEL DO
2145 END IF
2146 END IF
2147 ELSE
2148 cpabort("Grids not compatible")
2149 END IF
2150
2151 ELSE
2152
2153 cpabort("Grids not compatible")
2154
2155 END IF
2156
2157 CALL timestop(handle)
2158
2159 END SUBROUTINE pw_axpy_r1d_r1d_rs
2160
2161! **************************************************************************************************
2162!> \brief pw_out = pw_out + alpha * pw1 * pw2
2163!> alpha defaults to 1
2164!> \param pw_out ...
2165!> \param pw1 ...
2166!> \param pw2 ...
2167!> \param alpha ...
2168!> \author JGH
2169! **************************************************************************************************
2170 SUBROUTINE pw_multiply_r1d_r1d_rs (pw_out, pw1, pw2, alpha)
2171
2172 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw_out
2173 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2174 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2175 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
2176
2177 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
2178
2179 INTEGER :: handle
2180 REAL(KIND=dp) :: my_alpha
2181
2182 CALL timeset(routinen, handle)
2183
2184 my_alpha = 1.0_dp
2185 IF (PRESENT(alpha)) my_alpha = alpha
2186
2187 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
2188 cpabort("pw_multiply not implemented for non-identical grids!")
2189
2190#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
2191 IF (my_alpha == 1.0_dp) THEN
2192!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
2193 pw_out%array = pw_out%array + pw1%array* pw2%array
2194!$OMP END PARALLEL WORKSHARE
2195 ELSE
2196!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
2197 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2198!$OMP END PARALLEL WORKSHARE
2199 END IF
2200#else
2201 IF (my_alpha == 1.0_dp) THEN
2202 pw_out%array = pw_out%array + pw1%array* pw2%array
2203 ELSE
2204 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2205 END IF
2206#endif
2207
2208 CALL timestop(handle)
2209
2210 END SUBROUTINE pw_multiply_r1d_r1d_rs
2211
2212! **************************************************************************************************
2213!> \brief ...
2214!> \param pw1 ...
2215!> \param pw2 ...
2216! **************************************************************************************************
2217 SUBROUTINE pw_multiply_with_r1d_r1d_rs (pw1, pw2)
2218 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw1
2219 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2220
2221 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
2222
2223 INTEGER :: handle
2224
2225 CALL timeset(routinen, handle)
2226
2227 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
2228 cpabort("Incompatible grids!")
2229
2230!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
2231 pw1%array = pw1%array* pw2%array
2232!$OMP END PARALLEL WORKSHARE
2233
2234 CALL timestop(handle)
2235
2236 END SUBROUTINE pw_multiply_with_r1d_r1d_rs
2237
2238! **************************************************************************************************
2239!> \brief Calculate integral over unit cell for functions in plane wave basis
2240!> only returns the real part of it ......
2241!> \param pw1 ...
2242!> \param pw2 ...
2243!> \param sumtype ...
2244!> \param just_sum ...
2245!> \param local_only ...
2246!> \return ...
2247!> \par History
2248!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
2249!> \author apsi
2250! **************************************************************************************************
2251 FUNCTION pw_integral_ab_r1d_r1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
2252
2253 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2254 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2255 INTEGER, INTENT(IN), OPTIONAL :: sumtype
2256 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
2257 REAL(kind=dp) :: integral_value
2258
2259 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_r1d_rs'
2260
2261 INTEGER :: handle, loc_sumtype
2262 LOGICAL :: my_just_sum, my_local_only
2263
2264 CALL timeset(routinen, handle)
2265
2266 loc_sumtype = do_accurate_sum
2267 IF (PRESENT(sumtype)) loc_sumtype = sumtype
2268
2269 my_local_only = .false.
2270 IF (PRESENT(local_only)) my_local_only = local_only
2271
2272 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2273 cpabort("Grids incompatible")
2274 END IF
2275
2276 my_just_sum = .false.
2277 IF (PRESENT(just_sum)) my_just_sum = just_sum
2278
2279 ! do standard sum
2280 IF (loc_sumtype == do_standard_sum) THEN
2281
2282 ! Do standard sum
2283
2284 integral_value = dot_product(pw1%array, pw2%array)
2285
2286 ELSE
2287
2288 ! Do accurate sum
2289 integral_value = accurate_dot_product(pw1%array, pw2%array)
2290
2291 END IF
2292
2293 IF (.NOT. my_just_sum) THEN
2294 integral_value = integral_value*pw1%pw_grid%dvol
2295 END IF
2296
2297 IF (pw1%pw_grid%grid_span == halfspace) THEN
2298 integral_value = 2.0_dp*integral_value
2299 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
2300 pw1%array(1)*pw2%array(1)
2301 END IF
2302
2303 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
2304 CALL pw1%pw_grid%para%group%sum(integral_value)
2305
2306 CALL timestop(handle)
2307
2308 END FUNCTION pw_integral_ab_r1d_r1d_rs
2309! **************************************************************************************************
2310!> \brief copy a pw type variable
2311!> \param pw1 ...
2312!> \param pw2 ...
2313!> \par History
2314!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
2315!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
2316!> JGH (21-Feb-2003) : Code for generalized reference grids
2317!> \author apsi
2318!> \note
2319!> Currently only copying of respective types allowed,
2320!> in order to avoid errors
2321! **************************************************************************************************
2322 SUBROUTINE pw_copy_r1d_r1d_gs (pw1, pw2)
2323
2324 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2325 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
2326
2327 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
2328
2329 INTEGER :: handle
2330 INTEGER :: i, j, ng, ng1, ng2, ns
2331
2332 CALL timeset(routinen, handle)
2333
2334 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
2335 cpabort("Both grids must be either spherical or non-spherical!")
2336
2337 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2338 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
2339 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
2340 ng1 = SIZE(pw1%array)
2341 ng2 = SIZE(pw2%array)
2342 ng = min(ng1, ng2)
2343!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
2344 pw2%array(1:ng) = pw1%array(1:ng)
2345!$OMP END PARALLEL WORKSHARE
2346 IF (ng2 > ng) THEN
2347!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
2348 pw2%array(ng + 1:ng2) = 0.0_dp
2349!$OMP END PARALLEL WORKSHARE
2350 END IF
2351 ELSE
2352 cpabort("Copies between spherical grids require compatible grids!")
2353 END IF
2354 ELSE
2355 ng1 = SIZE(pw1%array)
2356 ng2 = SIZE(pw2%array)
2357 ns = 2*max(ng1, ng2)
2358
2359 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2360 IF (ng1 >= ng2) THEN
2361!$OMP PARALLEL DO DEFAULT(NONE) &
2362!$OMP PRIVATE(i,j) &
2363!$OMP SHARED(ng2, pw1, pw2)
2364 DO i = 1, ng2
2365 j = pw2%pw_grid%gidx(i)
2366 pw2%array(i) = pw1%array(j)
2367 END DO
2368!$OMP END PARALLEL DO
2369 ELSE
2370 CALL pw_zero(pw2)
2371!$OMP PARALLEL DO DEFAULT(NONE) &
2372!$OMP PRIVATE(i,j) &
2373!$OMP SHARED(ng1, pw1, pw2)
2374 DO i = 1, ng1
2375 j = pw2%pw_grid%gidx(i)
2376 pw2%array(j) = pw1%array(i)
2377 END DO
2378!$OMP END PARALLEL DO
2379 END IF
2380 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2381 IF (ng1 >= ng2) THEN
2382!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2383 DO i = 1, ng2
2384 j = pw1%pw_grid%gidx(i)
2385 pw2%array(i) = pw1%array(j)
2386 END DO
2387!$OMP END PARALLEL DO
2388 ELSE
2389 CALL pw_zero(pw2)
2390!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2391 DO i = 1, ng1
2392 j = pw1%pw_grid%gidx(i)
2393 pw2%array(j) = pw1%array(i)
2394 END DO
2395!$OMP END PARALLEL DO
2396 END IF
2397 ELSE
2398 cpabort("Copy not implemented!")
2399 END IF
2400
2401 END IF
2402
2403 ELSE
2404!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2405 pw2%array = pw1%array
2406!$OMP END PARALLEL WORKSHARE
2407 END IF
2408
2409 CALL timestop(handle)
2410
2411 END SUBROUTINE pw_copy_r1d_r1d_gs
2412
2413! **************************************************************************************************
2414!> \brief ...
2415!> \param pw ...
2416!> \param array ...
2417! **************************************************************************************************
2418 SUBROUTINE pw_copy_to_array_r1d_r1d_gs (pw, array)
2419 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
2420 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
2421
2422 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
2423
2424 INTEGER :: handle
2425
2426 CALL timeset(routinen, handle)
2427
2428!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2429 array(:) = pw%array(:)
2430!$OMP END PARALLEL WORKSHARE
2431
2432 CALL timestop(handle)
2433 END SUBROUTINE pw_copy_to_array_r1d_r1d_gs
2434
2435! **************************************************************************************************
2436!> \brief ...
2437!> \param pw ...
2438!> \param array ...
2439! **************************************************************************************************
2440 SUBROUTINE pw_copy_from_array_r1d_r1d_gs (pw, array)
2441 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
2442 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
2443
2444 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
2445
2446 INTEGER :: handle
2447
2448 CALL timeset(routinen, handle)
2449
2450!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2451 pw%array = array
2452!$OMP END PARALLEL WORKSHARE
2453
2454 CALL timestop(handle)
2455 END SUBROUTINE pw_copy_from_array_r1d_r1d_gs
2456
2457! **************************************************************************************************
2458!> \brief pw2 = alpha*pw1 + beta*pw2
2459!> alpha defaults to 1, beta defaults to 1
2460!> \param pw1 ...
2461!> \param pw2 ...
2462!> \param alpha ...
2463!> \param beta ...
2464!> \param allow_noncompatible_grids ...
2465!> \par History
2466!> JGH (21-Feb-2003) : added reference grid functionality
2467!> JGH (01-Dec-2007) : rename and remove complex alpha
2468!> \author apsi
2469!> \note
2470!> Currently only summing up of respective types allowed,
2471!> in order to avoid errors
2472! **************************************************************************************************
2473 SUBROUTINE pw_axpy_r1d_r1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2474
2475 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2476 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
2477 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2478 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2479
2480 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2481
2482 INTEGER :: handle
2483 LOGICAL :: my_allow_noncompatible_grids
2484 REAL(KIND=dp) :: my_alpha, my_beta
2485 INTEGER :: i, j, ng, ng1, ng2
2486
2487 CALL timeset(routinen, handle)
2488
2489 my_alpha = 1.0_dp
2490 IF (PRESENT(alpha)) my_alpha = alpha
2491
2492 my_beta = 1.0_dp
2493 IF (PRESENT(beta)) my_beta = beta
2494
2495 my_allow_noncompatible_grids = .false.
2496 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2497
2498 IF (my_beta /= 1.0_dp) THEN
2499 IF (my_beta == 0.0_dp) THEN
2500 CALL pw_zero(pw2)
2501 ELSE
2502!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
2503 pw2%array = pw2%array*my_beta
2504!$OMP END PARALLEL WORKSHARE
2505 END IF
2506 END IF
2507
2508 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2509
2510 IF (my_alpha == 1.0_dp) THEN
2511!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2512 pw2%array = pw2%array + pw1%array
2513!$OMP END PARALLEL WORKSHARE
2514 ELSE IF (my_alpha /= 0.0_dp) THEN
2515!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
2516 pw2%array = pw2%array + my_alpha* pw1%array
2517!$OMP END PARALLEL WORKSHARE
2518 END IF
2519
2520 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
2521
2522 ng1 = SIZE(pw1%array)
2523 ng2 = SIZE(pw2%array)
2524 ng = min(ng1, ng2)
2525
2526 IF (pw1%pw_grid%spherical) THEN
2527 IF (my_alpha == 1.0_dp) THEN
2528!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2529 DO i = 1, ng
2530 pw2%array(i) = pw2%array(i) + pw1%array(i)
2531 END DO
2532!$OMP END PARALLEL DO
2533 ELSE IF (my_alpha /= 0.0_dp) THEN
2534!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
2535 DO i = 1, ng
2536 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
2537 END DO
2538!$OMP END PARALLEL DO
2539 END IF
2540 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2541 IF (ng1 >= ng2) THEN
2542 IF (my_alpha == 1.0_dp) THEN
2543!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2544 DO i = 1, ng
2545 j = pw2%pw_grid%gidx(i)
2546 pw2%array(i) = pw2%array(i) + pw1%array(j)
2547 END DO
2548!$OMP END PARALLEL DO
2549 ELSE IF (my_alpha /= 0.0_dp) THEN
2550!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2551 DO i = 1, ng
2552 j = pw2%pw_grid%gidx(i)
2553 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2554 END DO
2555!$OMP END PARALLEL DO
2556 END IF
2557 ELSE
2558 IF (my_alpha == 1.0_dp) THEN
2559!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2560 DO i = 1, ng
2561 j = pw2%pw_grid%gidx(i)
2562 pw2%array(j) = pw2%array(j) + pw1%array(i)
2563 END DO
2564!$OMP END PARALLEL DO
2565 ELSE IF (my_alpha /= 0.0_dp) THEN
2566!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2567 DO i = 1, ng
2568 j = pw2%pw_grid%gidx(i)
2569 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2570 END DO
2571!$OMP END PARALLEL DO
2572 END IF
2573 END IF
2574 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2575 IF (ng1 >= ng2) THEN
2576 IF (my_alpha == 1.0_dp) THEN
2577!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2578 DO i = 1, ng
2579 j = pw1%pw_grid%gidx(i)
2580 pw2%array(i) = pw2%array(i) + pw1%array(j)
2581 END DO
2582!$OMP END PARALLEL DO
2583 ELSE IF (my_alpha /= 0.0_dp) THEN
2584!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2585 DO i = 1, ng
2586 j = pw1%pw_grid%gidx(i)
2587 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2588 END DO
2589!$OMP END PARALLEL DO
2590 END IF
2591 ELSE
2592 IF (my_alpha == 1.0_dp) THEN
2593!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2594 DO i = 1, ng
2595 j = pw1%pw_grid%gidx(i)
2596 pw2%array(j) = pw2%array(j) + pw1%array(i)
2597 END DO
2598!$OMP END PARALLEL DO
2599 ELSE IF (my_alpha /= 0.0_dp) THEN
2600!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2601 DO i = 1, ng
2602 j = pw1%pw_grid%gidx(i)
2603 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2604 END DO
2605!$OMP END PARALLEL DO
2606 END IF
2607 END IF
2608 ELSE
2609 cpabort("Grids not compatible")
2610 END IF
2611
2612 ELSE
2613
2614 cpabort("Grids not compatible")
2615
2616 END IF
2617
2618 CALL timestop(handle)
2619
2620 END SUBROUTINE pw_axpy_r1d_r1d_gs
2621
2622! **************************************************************************************************
2623!> \brief pw_out = pw_out + alpha * pw1 * pw2
2624!> alpha defaults to 1
2625!> \param pw_out ...
2626!> \param pw1 ...
2627!> \param pw2 ...
2628!> \param alpha ...
2629!> \author JGH
2630! **************************************************************************************************
2631 SUBROUTINE pw_multiply_r1d_r1d_gs (pw_out, pw1, pw2, alpha)
2632
2633 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw_out
2634 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2635 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2636 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
2637
2638 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
2639
2640 INTEGER :: handle
2641 REAL(KIND=dp) :: my_alpha
2642
2643 CALL timeset(routinen, handle)
2644
2645 my_alpha = 1.0_dp
2646 IF (PRESENT(alpha)) my_alpha = alpha
2647
2648 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
2649 cpabort("pw_multiply not implemented for non-identical grids!")
2650
2651#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
2652 IF (my_alpha == 1.0_dp) THEN
2653!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
2654 pw_out%array = pw_out%array + pw1%array* pw2%array
2655!$OMP END PARALLEL WORKSHARE
2656 ELSE
2657!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
2658 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2659!$OMP END PARALLEL WORKSHARE
2660 END IF
2661#else
2662 IF (my_alpha == 1.0_dp) THEN
2663 pw_out%array = pw_out%array + pw1%array* pw2%array
2664 ELSE
2665 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2666 END IF
2667#endif
2668
2669 CALL timestop(handle)
2670
2671 END SUBROUTINE pw_multiply_r1d_r1d_gs
2672
2673! **************************************************************************************************
2674!> \brief ...
2675!> \param pw1 ...
2676!> \param pw2 ...
2677! **************************************************************************************************
2678 SUBROUTINE pw_multiply_with_r1d_r1d_gs (pw1, pw2)
2679 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw1
2680 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2681
2682 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
2683
2684 INTEGER :: handle
2685
2686 CALL timeset(routinen, handle)
2687
2688 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
2689 cpabort("Incompatible grids!")
2690
2691!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
2692 pw1%array = pw1%array* pw2%array
2693!$OMP END PARALLEL WORKSHARE
2694
2695 CALL timestop(handle)
2696
2697 END SUBROUTINE pw_multiply_with_r1d_r1d_gs
2698
2699! **************************************************************************************************
2700!> \brief Calculate integral over unit cell for functions in plane wave basis
2701!> only returns the real part of it ......
2702!> \param pw1 ...
2703!> \param pw2 ...
2704!> \param sumtype ...
2705!> \param just_sum ...
2706!> \param local_only ...
2707!> \return ...
2708!> \par History
2709!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
2710!> \author apsi
2711! **************************************************************************************************
2712 FUNCTION pw_integral_ab_r1d_r1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
2713
2714 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2715 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2716 INTEGER, INTENT(IN), OPTIONAL :: sumtype
2717 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
2718 REAL(kind=dp) :: integral_value
2719
2720 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_r1d_gs'
2721
2722 INTEGER :: handle, loc_sumtype
2723 LOGICAL :: my_just_sum, my_local_only
2724
2725 CALL timeset(routinen, handle)
2726
2727 loc_sumtype = do_accurate_sum
2728 IF (PRESENT(sumtype)) loc_sumtype = sumtype
2729
2730 my_local_only = .false.
2731 IF (PRESENT(local_only)) my_local_only = local_only
2732
2733 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2734 cpabort("Grids incompatible")
2735 END IF
2736
2737 my_just_sum = .false.
2738 IF (PRESENT(just_sum)) my_just_sum = just_sum
2739
2740 ! do standard sum
2741 IF (loc_sumtype == do_standard_sum) THEN
2742
2743 ! Do standard sum
2744
2745 integral_value = dot_product(pw1%array, pw2%array)
2746
2747 ELSE
2748
2749 ! Do accurate sum
2750 integral_value = accurate_dot_product(pw1%array, pw2%array)
2751
2752 END IF
2753
2754 IF (.NOT. my_just_sum) THEN
2755 integral_value = integral_value*pw1%pw_grid%vol
2756 END IF
2757
2758 IF (pw1%pw_grid%grid_span == halfspace) THEN
2759 integral_value = 2.0_dp*integral_value
2760 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
2761 pw1%array(1)*pw2%array(1)
2762 END IF
2763
2764 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
2765 CALL pw1%pw_grid%para%group%sum(integral_value)
2766
2767 CALL timestop(handle)
2768
2769 END FUNCTION pw_integral_ab_r1d_r1d_gs
2770
2771! **************************************************************************************************
2772!> \brief ...
2773!> \param pw1 ...
2774!> \param pw2 ...
2775!> \return ...
2776! **************************************************************************************************
2777 FUNCTION pw_integral_a2b_r1d_r1d (pw1, pw2) RESULT(integral_value)
2778
2779 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2780 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2781 REAL(kind=dp) :: integral_value
2782
2783 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
2784
2785 INTEGER :: handle
2786
2787 CALL timeset(routinen, handle)
2788
2789 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2790 cpabort("Grids incompatible")
2791 END IF
2792
2793 integral_value = accurate_sum(pw1%array*pw2%array*pw1%pw_grid%gsq)
2794 IF (pw1%pw_grid%grid_span == halfspace) THEN
2795 integral_value = 2.0_dp*integral_value
2796 END IF
2797
2798 integral_value = integral_value*pw1%pw_grid%vol
2799
2800 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
2801 CALL pw1%pw_grid%para%group%sum(integral_value)
2802 CALL timestop(handle)
2803
2804 END FUNCTION pw_integral_a2b_r1d_r1d
2805! **************************************************************************************************
2806!> \brief copy a pw type variable
2807!> \param pw1 ...
2808!> \param pw2 ...
2809!> \par History
2810!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
2811!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
2812!> JGH (21-Feb-2003) : Code for generalized reference grids
2813!> \author apsi
2814!> \note
2815!> Currently only copying of respective types allowed,
2816!> in order to avoid errors
2817! **************************************************************************************************
2818 SUBROUTINE pw_copy_r1d_c1d_rs (pw1, pw2)
2819
2820 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2821 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
2822
2823 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
2824
2825 INTEGER :: handle
2826 INTEGER :: i, j, ng, ng1, ng2, ns
2827
2828 CALL timeset(routinen, handle)
2829
2830 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
2831 cpabort("Both grids must be either spherical or non-spherical!")
2832 IF (pw1%pw_grid%spherical) &
2833 cpabort("Spherical grids only exist in reciprocal space!")
2834
2835 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2836 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
2837 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
2838 ng1 = SIZE(pw1%array)
2839 ng2 = SIZE(pw2%array)
2840 ng = min(ng1, ng2)
2841!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
2842 pw2%array(1:ng) = cmplx(pw1%array(1:ng), 0.0_dp, kind=dp)
2843!$OMP END PARALLEL WORKSHARE
2844 IF (ng2 > ng) THEN
2845!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
2846 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
2847!$OMP END PARALLEL WORKSHARE
2848 END IF
2849 ELSE
2850 cpabort("Copies between spherical grids require compatible grids!")
2851 END IF
2852 ELSE
2853 ng1 = SIZE(pw1%array)
2854 ng2 = SIZE(pw2%array)
2855 ns = 2*max(ng1, ng2)
2856
2857 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2858 IF (ng1 >= ng2) THEN
2859!$OMP PARALLEL DO DEFAULT(NONE) &
2860!$OMP PRIVATE(i,j) &
2861!$OMP SHARED(ng2, pw1, pw2)
2862 DO i = 1, ng2
2863 j = pw2%pw_grid%gidx(i)
2864 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
2865 END DO
2866!$OMP END PARALLEL DO
2867 ELSE
2868 CALL pw_zero(pw2)
2869!$OMP PARALLEL DO DEFAULT(NONE) &
2870!$OMP PRIVATE(i,j) &
2871!$OMP SHARED(ng1, pw1, pw2)
2872 DO i = 1, ng1
2873 j = pw2%pw_grid%gidx(i)
2874 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
2875 END DO
2876!$OMP END PARALLEL DO
2877 END IF
2878 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2879 IF (ng1 >= ng2) THEN
2880!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2881 DO i = 1, ng2
2882 j = pw1%pw_grid%gidx(i)
2883 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
2884 END DO
2885!$OMP END PARALLEL DO
2886 ELSE
2887 CALL pw_zero(pw2)
2888!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2889 DO i = 1, ng1
2890 j = pw1%pw_grid%gidx(i)
2891 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
2892 END DO
2893!$OMP END PARALLEL DO
2894 END IF
2895 ELSE
2896 cpabort("Copy not implemented!")
2897 END IF
2898
2899 END IF
2900
2901 ELSE
2902!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2903 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
2904!$OMP END PARALLEL WORKSHARE
2905 END IF
2906
2907 CALL timestop(handle)
2908
2909 END SUBROUTINE pw_copy_r1d_c1d_rs
2910
2911! **************************************************************************************************
2912!> \brief ...
2913!> \param pw ...
2914!> \param array ...
2915! **************************************************************************************************
2916 SUBROUTINE pw_copy_to_array_r1d_c1d_rs (pw, array)
2917 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
2918 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
2919
2920 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
2921
2922 INTEGER :: handle
2923
2924 CALL timeset(routinen, handle)
2925
2926!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2927 array(:) = cmplx(pw%array(:), 0.0_dp, kind=dp)
2928!$OMP END PARALLEL WORKSHARE
2929
2930 CALL timestop(handle)
2931 END SUBROUTINE pw_copy_to_array_r1d_c1d_rs
2932
2933! **************************************************************************************************
2934!> \brief ...
2935!> \param pw ...
2936!> \param array ...
2937! **************************************************************************************************
2938 SUBROUTINE pw_copy_from_array_r1d_c1d_rs (pw, array)
2939 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
2940 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
2941
2942 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
2943
2944 INTEGER :: handle
2945
2946 CALL timeset(routinen, handle)
2947
2948!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2949 pw%array = real(array, kind=dp)
2950!$OMP END PARALLEL WORKSHARE
2951
2952 CALL timestop(handle)
2953 END SUBROUTINE pw_copy_from_array_r1d_c1d_rs
2954
2955! **************************************************************************************************
2956!> \brief pw2 = alpha*pw1 + beta*pw2
2957!> alpha defaults to 1, beta defaults to 1
2958!> \param pw1 ...
2959!> \param pw2 ...
2960!> \param alpha ...
2961!> \param beta ...
2962!> \param allow_noncompatible_grids ...
2963!> \par History
2964!> JGH (21-Feb-2003) : added reference grid functionality
2965!> JGH (01-Dec-2007) : rename and remove complex alpha
2966!> \author apsi
2967!> \note
2968!> Currently only summing up of respective types allowed,
2969!> in order to avoid errors
2970! **************************************************************************************************
2971 SUBROUTINE pw_axpy_r1d_c1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2972
2973 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2974 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
2975 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2976 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2977
2978 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2979
2980 INTEGER :: handle
2981 LOGICAL :: my_allow_noncompatible_grids
2982 REAL(KIND=dp) :: my_alpha, my_beta
2983 INTEGER :: i, j, ng, ng1, ng2
2984
2985 CALL timeset(routinen, handle)
2986
2987 my_alpha = 1.0_dp
2988 IF (PRESENT(alpha)) my_alpha = alpha
2989
2990 my_beta = 1.0_dp
2991 IF (PRESENT(beta)) my_beta = beta
2992
2993 my_allow_noncompatible_grids = .false.
2994 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2995
2996 IF (my_beta /= 1.0_dp) THEN
2997 IF (my_beta == 0.0_dp) THEN
2998 CALL pw_zero(pw2)
2999 ELSE
3000!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3001 pw2%array = pw2%array*my_beta
3002!$OMP END PARALLEL WORKSHARE
3003 END IF
3004 END IF
3005
3006 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3007
3008 IF (my_alpha == 1.0_dp) THEN
3009!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3010 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
3011!$OMP END PARALLEL WORKSHARE
3012 ELSE IF (my_alpha /= 0.0_dp) THEN
3013!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
3014 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
3015!$OMP END PARALLEL WORKSHARE
3016 END IF
3017
3018 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3019
3020 ng1 = SIZE(pw1%array)
3021 ng2 = SIZE(pw2%array)
3022 ng = min(ng1, ng2)
3023
3024 IF (pw1%pw_grid%spherical) THEN
3025 IF (my_alpha == 1.0_dp) THEN
3026!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3027 DO i = 1, ng
3028 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3029 END DO
3030!$OMP END PARALLEL DO
3031 ELSE IF (my_alpha /= 0.0_dp) THEN
3032!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
3033 DO i = 1, ng
3034 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3035 END DO
3036!$OMP END PARALLEL DO
3037 END IF
3038 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3039 IF (ng1 >= ng2) THEN
3040 IF (my_alpha == 1.0_dp) THEN
3041!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3042 DO i = 1, ng
3043 j = pw2%pw_grid%gidx(i)
3044 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3045 END DO
3046!$OMP END PARALLEL DO
3047 ELSE IF (my_alpha /= 0.0_dp) THEN
3048!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3049 DO i = 1, ng
3050 j = pw2%pw_grid%gidx(i)
3051 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3052 END DO
3053!$OMP END PARALLEL DO
3054 END IF
3055 ELSE
3056 IF (my_alpha == 1.0_dp) THEN
3057!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3058 DO i = 1, ng
3059 j = pw2%pw_grid%gidx(i)
3060 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3061 END DO
3062!$OMP END PARALLEL DO
3063 ELSE IF (my_alpha /= 0.0_dp) THEN
3064!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3065 DO i = 1, ng
3066 j = pw2%pw_grid%gidx(i)
3067 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3068 END DO
3069!$OMP END PARALLEL DO
3070 END IF
3071 END IF
3072 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3073 IF (ng1 >= ng2) THEN
3074 IF (my_alpha == 1.0_dp) THEN
3075!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3076 DO i = 1, ng
3077 j = pw1%pw_grid%gidx(i)
3078 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3079 END DO
3080!$OMP END PARALLEL DO
3081 ELSE IF (my_alpha /= 0.0_dp) THEN
3082!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3083 DO i = 1, ng
3084 j = pw1%pw_grid%gidx(i)
3085 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3086 END DO
3087!$OMP END PARALLEL DO
3088 END IF
3089 ELSE
3090 IF (my_alpha == 1.0_dp) THEN
3091!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3092 DO i = 1, ng
3093 j = pw1%pw_grid%gidx(i)
3094 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3095 END DO
3096!$OMP END PARALLEL DO
3097 ELSE IF (my_alpha /= 0.0_dp) THEN
3098!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3099 DO i = 1, ng
3100 j = pw1%pw_grid%gidx(i)
3101 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3102 END DO
3103!$OMP END PARALLEL DO
3104 END IF
3105 END IF
3106 ELSE
3107 cpabort("Grids not compatible")
3108 END IF
3109
3110 ELSE
3111
3112 cpabort("Grids not compatible")
3113
3114 END IF
3115
3116 CALL timestop(handle)
3117
3118 END SUBROUTINE pw_axpy_r1d_c1d_rs
3119
3120! **************************************************************************************************
3121!> \brief pw_out = pw_out + alpha * pw1 * pw2
3122!> alpha defaults to 1
3123!> \param pw_out ...
3124!> \param pw1 ...
3125!> \param pw2 ...
3126!> \param alpha ...
3127!> \author JGH
3128! **************************************************************************************************
3129 SUBROUTINE pw_multiply_r1d_c1d_rs (pw_out, pw1, pw2, alpha)
3130
3131 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw_out
3132 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
3133 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3134 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3135
3136 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3137
3138 INTEGER :: handle
3139 REAL(KIND=dp) :: my_alpha
3140
3141 CALL timeset(routinen, handle)
3142
3143 my_alpha = 1.0_dp
3144 IF (PRESENT(alpha)) my_alpha = alpha
3145
3146 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3147 cpabort("pw_multiply not implemented for non-identical grids!")
3148
3149#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
3150 IF (my_alpha == 1.0_dp) THEN
3151!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3152 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3153!$OMP END PARALLEL WORKSHARE
3154 ELSE
3155!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3156 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3157!$OMP END PARALLEL WORKSHARE
3158 END IF
3159#else
3160 IF (my_alpha == 1.0_dp) THEN
3161 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3162 ELSE
3163 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3164 END IF
3165#endif
3166
3167 CALL timestop(handle)
3168
3169 END SUBROUTINE pw_multiply_r1d_c1d_rs
3170
3171! **************************************************************************************************
3172!> \brief ...
3173!> \param pw1 ...
3174!> \param pw2 ...
3175! **************************************************************************************************
3176 SUBROUTINE pw_multiply_with_r1d_c1d_rs (pw1, pw2)
3177 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw1
3178 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3179
3180 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3181
3182 INTEGER :: handle
3183
3184 CALL timeset(routinen, handle)
3185
3186 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
3187 cpabort("Incompatible grids!")
3188
3189!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3190 pw1%array = pw1%array* real(pw2%array, kind=dp)
3191!$OMP END PARALLEL WORKSHARE
3192
3193 CALL timestop(handle)
3194
3195 END SUBROUTINE pw_multiply_with_r1d_c1d_rs
3196
3197! **************************************************************************************************
3198!> \brief Calculate integral over unit cell for functions in plane wave basis
3199!> only returns the real part of it ......
3200!> \param pw1 ...
3201!> \param pw2 ...
3202!> \param sumtype ...
3203!> \param just_sum ...
3204!> \param local_only ...
3205!> \return ...
3206!> \par History
3207!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
3208!> \author apsi
3209! **************************************************************************************************
3210 FUNCTION pw_integral_ab_r1d_c1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
3211
3212 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
3213 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3214 INTEGER, INTENT(IN), OPTIONAL :: sumtype
3215 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
3216 REAL(kind=dp) :: integral_value
3217
3218 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_c1d_rs'
3219
3220 INTEGER :: handle, loc_sumtype
3221 LOGICAL :: my_just_sum, my_local_only
3222
3223 CALL timeset(routinen, handle)
3224
3225 loc_sumtype = do_accurate_sum
3226 IF (PRESENT(sumtype)) loc_sumtype = sumtype
3227
3228 my_local_only = .false.
3229 IF (PRESENT(local_only)) my_local_only = local_only
3230
3231 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3232 cpabort("Grids incompatible")
3233 END IF
3234
3235 my_just_sum = .false.
3236 IF (PRESENT(just_sum)) my_just_sum = just_sum
3237
3238 ! do standard sum
3239 IF (loc_sumtype == do_standard_sum) THEN
3240
3241 ! Do standard sum
3242
3243 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3244
3245 ELSE
3246
3247 ! Do accurate sum
3248 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3249
3250 END IF
3251
3252 IF (.NOT. my_just_sum) THEN
3253 integral_value = integral_value*pw1%pw_grid%dvol
3254 END IF
3255
3256 IF (pw1%pw_grid%grid_span == halfspace) THEN
3257 integral_value = 2.0_dp*integral_value
3258 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
3259 pw1%array(1)*real(pw2%array(1), kind=dp)
3260 END IF
3261
3262 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
3263 CALL pw1%pw_grid%para%group%sum(integral_value)
3264
3265 CALL timestop(handle)
3266
3267 END FUNCTION pw_integral_ab_r1d_c1d_rs
3268! **************************************************************************************************
3269!> \brief copy a pw type variable
3270!> \param pw1 ...
3271!> \param pw2 ...
3272!> \par History
3273!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
3274!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
3275!> JGH (21-Feb-2003) : Code for generalized reference grids
3276!> \author apsi
3277!> \note
3278!> Currently only copying of respective types allowed,
3279!> in order to avoid errors
3280! **************************************************************************************************
3281 SUBROUTINE pw_copy_r1d_c1d_gs (pw1, pw2)
3282
3283 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3284 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
3285
3286 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
3287
3288 INTEGER :: handle
3289 INTEGER :: i, j, ng, ng1, ng2, ns
3290
3291 CALL timeset(routinen, handle)
3292
3293 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
3294 cpabort("Both grids must be either spherical or non-spherical!")
3295
3296 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3297 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
3298 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
3299 ng1 = SIZE(pw1%array)
3300 ng2 = SIZE(pw2%array)
3301 ng = min(ng1, ng2)
3302!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
3303 pw2%array(1:ng) = cmplx(pw1%array(1:ng), 0.0_dp, kind=dp)
3304!$OMP END PARALLEL WORKSHARE
3305 IF (ng2 > ng) THEN
3306!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
3307 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
3308!$OMP END PARALLEL WORKSHARE
3309 END IF
3310 ELSE
3311 cpabort("Copies between spherical grids require compatible grids!")
3312 END IF
3313 ELSE
3314 ng1 = SIZE(pw1%array)
3315 ng2 = SIZE(pw2%array)
3316 ns = 2*max(ng1, ng2)
3317
3318 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3319 IF (ng1 >= ng2) THEN
3320!$OMP PARALLEL DO DEFAULT(NONE) &
3321!$OMP PRIVATE(i,j) &
3322!$OMP SHARED(ng2, pw1, pw2)
3323 DO i = 1, ng2
3324 j = pw2%pw_grid%gidx(i)
3325 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
3326 END DO
3327!$OMP END PARALLEL DO
3328 ELSE
3329 CALL pw_zero(pw2)
3330!$OMP PARALLEL DO DEFAULT(NONE) &
3331!$OMP PRIVATE(i,j) &
3332!$OMP SHARED(ng1, pw1, pw2)
3333 DO i = 1, ng1
3334 j = pw2%pw_grid%gidx(i)
3335 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
3336 END DO
3337!$OMP END PARALLEL DO
3338 END IF
3339 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3340 IF (ng1 >= ng2) THEN
3341!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
3342 DO i = 1, ng2
3343 j = pw1%pw_grid%gidx(i)
3344 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
3345 END DO
3346!$OMP END PARALLEL DO
3347 ELSE
3348 CALL pw_zero(pw2)
3349!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
3350 DO i = 1, ng1
3351 j = pw1%pw_grid%gidx(i)
3352 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
3353 END DO
3354!$OMP END PARALLEL DO
3355 END IF
3356 ELSE
3357 cpabort("Copy not implemented!")
3358 END IF
3359
3360 END IF
3361
3362 ELSE
3363!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3364 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
3365!$OMP END PARALLEL WORKSHARE
3366 END IF
3367
3368 CALL timestop(handle)
3369
3370 END SUBROUTINE pw_copy_r1d_c1d_gs
3371
3372! **************************************************************************************************
3373!> \brief ...
3374!> \param pw ...
3375!> \param array ...
3376! **************************************************************************************************
3377 SUBROUTINE pw_copy_to_array_r1d_c1d_gs (pw, array)
3378 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
3379 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
3380
3381 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
3382
3383 INTEGER :: handle
3384
3385 CALL timeset(routinen, handle)
3386
3387!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3388 array(:) = cmplx(pw%array(:), 0.0_dp, kind=dp)
3389!$OMP END PARALLEL WORKSHARE
3390
3391 CALL timestop(handle)
3392 END SUBROUTINE pw_copy_to_array_r1d_c1d_gs
3393
3394! **************************************************************************************************
3395!> \brief ...
3396!> \param pw ...
3397!> \param array ...
3398! **************************************************************************************************
3399 SUBROUTINE pw_copy_from_array_r1d_c1d_gs (pw, array)
3400 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
3401 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
3402
3403 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
3404
3405 INTEGER :: handle
3406
3407 CALL timeset(routinen, handle)
3408
3409!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3410 pw%array = real(array, kind=dp)
3411!$OMP END PARALLEL WORKSHARE
3412
3413 CALL timestop(handle)
3414 END SUBROUTINE pw_copy_from_array_r1d_c1d_gs
3415
3416! **************************************************************************************************
3417!> \brief pw2 = alpha*pw1 + beta*pw2
3418!> alpha defaults to 1, beta defaults to 1
3419!> \param pw1 ...
3420!> \param pw2 ...
3421!> \param alpha ...
3422!> \param beta ...
3423!> \param allow_noncompatible_grids ...
3424!> \par History
3425!> JGH (21-Feb-2003) : added reference grid functionality
3426!> JGH (01-Dec-2007) : rename and remove complex alpha
3427!> \author apsi
3428!> \note
3429!> Currently only summing up of respective types allowed,
3430!> in order to avoid errors
3431! **************************************************************************************************
3432 SUBROUTINE pw_axpy_r1d_c1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
3433
3434 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3435 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
3436 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
3437 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
3438
3439 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
3440
3441 INTEGER :: handle
3442 LOGICAL :: my_allow_noncompatible_grids
3443 REAL(KIND=dp) :: my_alpha, my_beta
3444 INTEGER :: i, j, ng, ng1, ng2
3445
3446 CALL timeset(routinen, handle)
3447
3448 my_alpha = 1.0_dp
3449 IF (PRESENT(alpha)) my_alpha = alpha
3450
3451 my_beta = 1.0_dp
3452 IF (PRESENT(beta)) my_beta = beta
3453
3454 my_allow_noncompatible_grids = .false.
3455 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
3456
3457 IF (my_beta /= 1.0_dp) THEN
3458 IF (my_beta == 0.0_dp) THEN
3459 CALL pw_zero(pw2)
3460 ELSE
3461!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3462 pw2%array = pw2%array*my_beta
3463!$OMP END PARALLEL WORKSHARE
3464 END IF
3465 END IF
3466
3467 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3468
3469 IF (my_alpha == 1.0_dp) THEN
3470!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3471 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
3472!$OMP END PARALLEL WORKSHARE
3473 ELSE IF (my_alpha /= 0.0_dp) THEN
3474!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
3475 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
3476!$OMP END PARALLEL WORKSHARE
3477 END IF
3478
3479 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3480
3481 ng1 = SIZE(pw1%array)
3482 ng2 = SIZE(pw2%array)
3483 ng = min(ng1, ng2)
3484
3485 IF (pw1%pw_grid%spherical) THEN
3486 IF (my_alpha == 1.0_dp) THEN
3487!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3488 DO i = 1, ng
3489 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3490 END DO
3491!$OMP END PARALLEL DO
3492 ELSE IF (my_alpha /= 0.0_dp) THEN
3493!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
3494 DO i = 1, ng
3495 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3496 END DO
3497!$OMP END PARALLEL DO
3498 END IF
3499 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3500 IF (ng1 >= ng2) THEN
3501 IF (my_alpha == 1.0_dp) THEN
3502!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3503 DO i = 1, ng
3504 j = pw2%pw_grid%gidx(i)
3505 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3506 END DO
3507!$OMP END PARALLEL DO
3508 ELSE IF (my_alpha /= 0.0_dp) THEN
3509!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3510 DO i = 1, ng
3511 j = pw2%pw_grid%gidx(i)
3512 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3513 END DO
3514!$OMP END PARALLEL DO
3515 END IF
3516 ELSE
3517 IF (my_alpha == 1.0_dp) THEN
3518!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3519 DO i = 1, ng
3520 j = pw2%pw_grid%gidx(i)
3521 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3522 END DO
3523!$OMP END PARALLEL DO
3524 ELSE IF (my_alpha /= 0.0_dp) THEN
3525!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3526 DO i = 1, ng
3527 j = pw2%pw_grid%gidx(i)
3528 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3529 END DO
3530!$OMP END PARALLEL DO
3531 END IF
3532 END IF
3533 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3534 IF (ng1 >= ng2) THEN
3535 IF (my_alpha == 1.0_dp) THEN
3536!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3537 DO i = 1, ng
3538 j = pw1%pw_grid%gidx(i)
3539 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3540 END DO
3541!$OMP END PARALLEL DO
3542 ELSE IF (my_alpha /= 0.0_dp) THEN
3543!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3544 DO i = 1, ng
3545 j = pw1%pw_grid%gidx(i)
3546 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3547 END DO
3548!$OMP END PARALLEL DO
3549 END IF
3550 ELSE
3551 IF (my_alpha == 1.0_dp) THEN
3552!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3553 DO i = 1, ng
3554 j = pw1%pw_grid%gidx(i)
3555 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3556 END DO
3557!$OMP END PARALLEL DO
3558 ELSE IF (my_alpha /= 0.0_dp) THEN
3559!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3560 DO i = 1, ng
3561 j = pw1%pw_grid%gidx(i)
3562 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3563 END DO
3564!$OMP END PARALLEL DO
3565 END IF
3566 END IF
3567 ELSE
3568 cpabort("Grids not compatible")
3569 END IF
3570
3571 ELSE
3572
3573 cpabort("Grids not compatible")
3574
3575 END IF
3576
3577 CALL timestop(handle)
3578
3579 END SUBROUTINE pw_axpy_r1d_c1d_gs
3580
3581! **************************************************************************************************
3582!> \brief pw_out = pw_out + alpha * pw1 * pw2
3583!> alpha defaults to 1
3584!> \param pw_out ...
3585!> \param pw1 ...
3586!> \param pw2 ...
3587!> \param alpha ...
3588!> \author JGH
3589! **************************************************************************************************
3590 SUBROUTINE pw_multiply_r1d_c1d_gs (pw_out, pw1, pw2, alpha)
3591
3592 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw_out
3593 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3594 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3595 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3596
3597 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3598
3599 INTEGER :: handle
3600 REAL(KIND=dp) :: my_alpha
3601
3602 CALL timeset(routinen, handle)
3603
3604 my_alpha = 1.0_dp
3605 IF (PRESENT(alpha)) my_alpha = alpha
3606
3607 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3608 cpabort("pw_multiply not implemented for non-identical grids!")
3609
3610#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
3611 IF (my_alpha == 1.0_dp) THEN
3612!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3613 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3614!$OMP END PARALLEL WORKSHARE
3615 ELSE
3616!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3617 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3618!$OMP END PARALLEL WORKSHARE
3619 END IF
3620#else
3621 IF (my_alpha == 1.0_dp) THEN
3622 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3623 ELSE
3624 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3625 END IF
3626#endif
3627
3628 CALL timestop(handle)
3629
3630 END SUBROUTINE pw_multiply_r1d_c1d_gs
3631
3632! **************************************************************************************************
3633!> \brief ...
3634!> \param pw1 ...
3635!> \param pw2 ...
3636! **************************************************************************************************
3637 SUBROUTINE pw_multiply_with_r1d_c1d_gs (pw1, pw2)
3638 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw1
3639 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3640
3641 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3642
3643 INTEGER :: handle
3644
3645 CALL timeset(routinen, handle)
3646
3647 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
3648 cpabort("Incompatible grids!")
3649
3650!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3651 pw1%array = pw1%array* real(pw2%array, kind=dp)
3652!$OMP END PARALLEL WORKSHARE
3653
3654 CALL timestop(handle)
3655
3656 END SUBROUTINE pw_multiply_with_r1d_c1d_gs
3657
3658! **************************************************************************************************
3659!> \brief Calculate integral over unit cell for functions in plane wave basis
3660!> only returns the real part of it ......
3661!> \param pw1 ...
3662!> \param pw2 ...
3663!> \param sumtype ...
3664!> \param just_sum ...
3665!> \param local_only ...
3666!> \return ...
3667!> \par History
3668!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
3669!> \author apsi
3670! **************************************************************************************************
3671 FUNCTION pw_integral_ab_r1d_c1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
3672
3673 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3674 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3675 INTEGER, INTENT(IN), OPTIONAL :: sumtype
3676 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
3677 REAL(kind=dp) :: integral_value
3678
3679 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r1d_c1d_gs'
3680
3681 INTEGER :: handle, loc_sumtype
3682 LOGICAL :: my_just_sum, my_local_only
3683
3684 CALL timeset(routinen, handle)
3685
3686 loc_sumtype = do_accurate_sum
3687 IF (PRESENT(sumtype)) loc_sumtype = sumtype
3688
3689 my_local_only = .false.
3690 IF (PRESENT(local_only)) my_local_only = local_only
3691
3692 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3693 cpabort("Grids incompatible")
3694 END IF
3695
3696 my_just_sum = .false.
3697 IF (PRESENT(just_sum)) my_just_sum = just_sum
3698
3699 ! do standard sum
3700 IF (loc_sumtype == do_standard_sum) THEN
3701
3702 ! Do standard sum
3703
3704 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3705
3706 ELSE
3707
3708 ! Do accurate sum
3709 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3710
3711 END IF
3712
3713 IF (.NOT. my_just_sum) THEN
3714 integral_value = integral_value*pw1%pw_grid%vol
3715 END IF
3716
3717 IF (pw1%pw_grid%grid_span == halfspace) THEN
3718 integral_value = 2.0_dp*integral_value
3719 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
3720 pw1%array(1)*real(pw2%array(1), kind=dp)
3721 END IF
3722
3723 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
3724 CALL pw1%pw_grid%para%group%sum(integral_value)
3725
3726 CALL timestop(handle)
3727
3728 END FUNCTION pw_integral_ab_r1d_c1d_gs
3729
3730! **************************************************************************************************
3731!> \brief ...
3732!> \param pw1 ...
3733!> \param pw2 ...
3734!> \return ...
3735! **************************************************************************************************
3736 FUNCTION pw_integral_a2b_r1d_c1d (pw1, pw2) RESULT(integral_value)
3737
3738 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3739 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3740 REAL(kind=dp) :: integral_value
3741
3742 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
3743
3744 INTEGER :: handle
3745
3746 CALL timeset(routinen, handle)
3747
3748 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3749 cpabort("Grids incompatible")
3750 END IF
3751
3752 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)*pw1%pw_grid%gsq)
3753 IF (pw1%pw_grid%grid_span == halfspace) THEN
3754 integral_value = 2.0_dp*integral_value
3755 END IF
3756
3757 integral_value = integral_value*pw1%pw_grid%vol
3758
3759 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
3760 CALL pw1%pw_grid%para%group%sum(integral_value)
3761 CALL timestop(handle)
3762
3763 END FUNCTION pw_integral_a2b_r1d_c1d
3764! **************************************************************************************************
3765!> \brief copy a pw type variable
3766!> \param pw1 ...
3767!> \param pw2 ...
3768!> \par History
3769!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
3770!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
3771!> JGH (21-Feb-2003) : Code for generalized reference grids
3772!> \author apsi
3773!> \note
3774!> Currently only copying of respective types allowed,
3775!> in order to avoid errors
3776! **************************************************************************************************
3777 SUBROUTINE pw_copy_r3d_r3d_rs (pw1, pw2)
3778
3779 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3780 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
3781
3782 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
3783
3784 INTEGER :: handle
3785
3786 CALL timeset(routinen, handle)
3787
3788 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
3789 cpabort("Both grids must be either spherical or non-spherical!")
3790 IF (pw1%pw_grid%spherical) &
3791 cpabort("Spherical grids only exist in reciprocal space!")
3792
3793 IF (any(shape(pw2%array) /= shape(pw1%array))) &
3794 cpabort("3D grids must be compatible!")
3795 IF (pw1%pw_grid%spherical) &
3796 cpabort("3D grids must not be spherical!")
3797!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3798 pw2%array(:, :, :) = pw1%array(:, :, :)
3799!$OMP END PARALLEL WORKSHARE
3800
3801 CALL timestop(handle)
3802
3803 END SUBROUTINE pw_copy_r3d_r3d_rs
3804
3805! **************************************************************************************************
3806!> \brief ...
3807!> \param pw ...
3808!> \param array ...
3809! **************************************************************************************************
3810 SUBROUTINE pw_copy_to_array_r3d_r3d_rs (pw, array)
3811 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
3812 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
3813
3814 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
3815
3816 INTEGER :: handle
3817
3818 CALL timeset(routinen, handle)
3819
3820!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3821 array(:, :, :) = pw%array(:, :, :)
3822!$OMP END PARALLEL WORKSHARE
3823
3824 CALL timestop(handle)
3825 END SUBROUTINE pw_copy_to_array_r3d_r3d_rs
3826
3827! **************************************************************************************************
3828!> \brief ...
3829!> \param pw ...
3830!> \param array ...
3831! **************************************************************************************************
3832 SUBROUTINE pw_copy_from_array_r3d_r3d_rs (pw, array)
3833 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
3834 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
3835
3836 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
3837
3838 INTEGER :: handle
3839
3840 CALL timeset(routinen, handle)
3841
3842!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3843 pw%array = array
3844!$OMP END PARALLEL WORKSHARE
3845
3846 CALL timestop(handle)
3847 END SUBROUTINE pw_copy_from_array_r3d_r3d_rs
3848
3849! **************************************************************************************************
3850!> \brief pw2 = alpha*pw1 + beta*pw2
3851!> alpha defaults to 1, beta defaults to 1
3852!> \param pw1 ...
3853!> \param pw2 ...
3854!> \param alpha ...
3855!> \param beta ...
3856!> \param allow_noncompatible_grids ...
3857!> \par History
3858!> JGH (21-Feb-2003) : added reference grid functionality
3859!> JGH (01-Dec-2007) : rename and remove complex alpha
3860!> \author apsi
3861!> \note
3862!> Currently only summing up of respective types allowed,
3863!> in order to avoid errors
3864! **************************************************************************************************
3865 SUBROUTINE pw_axpy_r3d_r3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
3866
3867 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3868 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
3869 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
3870 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
3871
3872 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
3873
3874 INTEGER :: handle
3875 LOGICAL :: my_allow_noncompatible_grids
3876 REAL(KIND=dp) :: my_alpha, my_beta
3877
3878 CALL timeset(routinen, handle)
3879
3880 my_alpha = 1.0_dp
3881 IF (PRESENT(alpha)) my_alpha = alpha
3882
3883 my_beta = 1.0_dp
3884 IF (PRESENT(beta)) my_beta = beta
3885
3886 my_allow_noncompatible_grids = .false.
3887 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
3888
3889 IF (my_beta /= 1.0_dp) THEN
3890 IF (my_beta == 0.0_dp) THEN
3891 CALL pw_zero(pw2)
3892 ELSE
3893!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3894 pw2%array = pw2%array*my_beta
3895!$OMP END PARALLEL WORKSHARE
3896 END IF
3897 END IF
3898
3899 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3900 IF (my_alpha == 1.0_dp) THEN
3901!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
3902 pw2%array = pw2%array + pw1%array
3903!$OMP END PARALLEL WORKSHARE
3904 ELSE IF (my_alpha /= 0.0_dp) THEN
3905!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
3906 pw2%array = pw2%array + my_alpha* pw1%array
3907!$OMP END PARALLEL WORKSHARE
3908 END IF
3909
3910 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3911
3912 IF (any(shape(pw1%array) /= shape(pw2%array))) &
3913 cpabort("Noncommensurate grids not implemented for 3D grids!")
3914
3915 IF (my_alpha == 1.0_dp) THEN
3916!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3917 pw2%array = pw2%array + pw1%array
3918!$OMP END PARALLEL WORKSHARE
3919 ELSE
3920!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
3921 pw2%array = pw2%array + my_alpha* pw1%array
3922!$OMP END PARALLEL WORKSHARE
3923 END IF
3924
3925 ELSE
3926
3927 cpabort("Grids not compatible")
3928
3929 END IF
3930
3931 CALL timestop(handle)
3932
3933 END SUBROUTINE pw_axpy_r3d_r3d_rs
3934
3935! **************************************************************************************************
3936!> \brief pw_out = pw_out + alpha * pw1 * pw2
3937!> alpha defaults to 1
3938!> \param pw_out ...
3939!> \param pw1 ...
3940!> \param pw2 ...
3941!> \param alpha ...
3942!> \author JGH
3943! **************************************************************************************************
3944 SUBROUTINE pw_multiply_r3d_r3d_rs (pw_out, pw1, pw2, alpha)
3945
3946 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw_out
3947 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3948 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
3949 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3950
3951 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3952
3953 INTEGER :: handle
3954 REAL(KIND=dp) :: my_alpha
3955
3956 CALL timeset(routinen, handle)
3957
3958 my_alpha = 1.0_dp
3959 IF (PRESENT(alpha)) my_alpha = alpha
3960
3961 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3962 cpabort("pw_multiply not implemented for non-identical grids!")
3963
3964#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
3965 IF (my_alpha == 1.0_dp) THEN
3966!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3967 pw_out%array = pw_out%array + pw1%array* pw2%array
3968!$OMP END PARALLEL WORKSHARE
3969 ELSE
3970!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3971 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
3972!$OMP END PARALLEL WORKSHARE
3973 END IF
3974#else
3975 IF (my_alpha == 1.0_dp) THEN
3976 pw_out%array = pw_out%array + pw1%array* pw2%array
3977 ELSE
3978 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
3979 END IF
3980#endif
3981
3982 CALL timestop(handle)
3983
3984 END SUBROUTINE pw_multiply_r3d_r3d_rs
3985
3986! **************************************************************************************************
3987!> \brief ...
3988!> \param pw1 ...
3989!> \param pw2 ...
3990! **************************************************************************************************
3991 SUBROUTINE pw_multiply_with_r3d_r3d_rs (pw1, pw2)
3992 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw1
3993 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
3994
3995 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3996
3997 INTEGER :: handle
3998
3999 CALL timeset(routinen, handle)
4000
4001 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4002 cpabort("Incompatible grids!")
4003
4004!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4005 pw1%array = pw1%array* pw2%array
4006!$OMP END PARALLEL WORKSHARE
4007
4008 CALL timestop(handle)
4009
4010 END SUBROUTINE pw_multiply_with_r3d_r3d_rs
4011
4012! **************************************************************************************************
4013!> \brief Calculate integral over unit cell for functions in plane wave basis
4014!> only returns the real part of it ......
4015!> \param pw1 ...
4016!> \param pw2 ...
4017!> \param sumtype ...
4018!> \param just_sum ...
4019!> \param local_only ...
4020!> \return ...
4021!> \par History
4022!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4023!> \author apsi
4024! **************************************************************************************************
4025 FUNCTION pw_integral_ab_r3d_r3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4026
4027 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4028 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
4029 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4030 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4031 REAL(kind=dp) :: integral_value
4032
4033 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_r3d_rs'
4034
4035 INTEGER :: handle, loc_sumtype
4036 LOGICAL :: my_just_sum, my_local_only
4037
4038 CALL timeset(routinen, handle)
4039
4040 loc_sumtype = do_accurate_sum
4041 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4042
4043 my_local_only = .false.
4044 IF (PRESENT(local_only)) my_local_only = local_only
4045
4046 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4047 cpabort("Grids incompatible")
4048 END IF
4049
4050 my_just_sum = .false.
4051 IF (PRESENT(just_sum)) my_just_sum = just_sum
4052
4053 ! do standard sum
4054 IF (loc_sumtype == do_standard_sum) THEN
4055
4056 ! Do standard sum
4057
4058 integral_value = sum(pw1%array*pw2%array)
4059
4060 ELSE
4061
4062 ! Do accurate sum
4063 integral_value = accurate_dot_product(pw1%array, pw2%array)
4064
4065 END IF
4066
4067 IF (.NOT. my_just_sum) THEN
4068 integral_value = integral_value*pw1%pw_grid%dvol
4069 END IF
4070
4071
4072 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4073 CALL pw1%pw_grid%para%group%sum(integral_value)
4074
4075 CALL timestop(handle)
4076
4077 END FUNCTION pw_integral_ab_r3d_r3d_rs
4078! **************************************************************************************************
4079!> \brief copy a pw type variable
4080!> \param pw1 ...
4081!> \param pw2 ...
4082!> \par History
4083!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4084!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4085!> JGH (21-Feb-2003) : Code for generalized reference grids
4086!> \author apsi
4087!> \note
4088!> Currently only copying of respective types allowed,
4089!> in order to avoid errors
4090! **************************************************************************************************
4091 SUBROUTINE pw_copy_r3d_r3d_gs (pw1, pw2)
4092
4093 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4094 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
4095
4096 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4097
4098 INTEGER :: handle
4099
4100 CALL timeset(routinen, handle)
4101
4102 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4103 cpabort("Both grids must be either spherical or non-spherical!")
4104
4105 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4106 cpabort("3D grids must be compatible!")
4107 IF (pw1%pw_grid%spherical) &
4108 cpabort("3D grids must not be spherical!")
4109!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4110 pw2%array(:, :, :) = pw1%array(:, :, :)
4111!$OMP END PARALLEL WORKSHARE
4112
4113 CALL timestop(handle)
4114
4115 END SUBROUTINE pw_copy_r3d_r3d_gs
4116
4117! **************************************************************************************************
4118!> \brief ...
4119!> \param pw ...
4120!> \param array ...
4121! **************************************************************************************************
4122 SUBROUTINE pw_copy_to_array_r3d_r3d_gs (pw, array)
4123 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4124 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4125
4126 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4127
4128 INTEGER :: handle
4129
4130 CALL timeset(routinen, handle)
4131
4132!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4133 array(:, :, :) = pw%array(:, :, :)
4134!$OMP END PARALLEL WORKSHARE
4135
4136 CALL timestop(handle)
4137 END SUBROUTINE pw_copy_to_array_r3d_r3d_gs
4138
4139! **************************************************************************************************
4140!> \brief ...
4141!> \param pw ...
4142!> \param array ...
4143! **************************************************************************************************
4144 SUBROUTINE pw_copy_from_array_r3d_r3d_gs (pw, array)
4145 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4146 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4147
4148 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4149
4150 INTEGER :: handle
4151
4152 CALL timeset(routinen, handle)
4153
4154!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4155 pw%array = array
4156!$OMP END PARALLEL WORKSHARE
4157
4158 CALL timestop(handle)
4159 END SUBROUTINE pw_copy_from_array_r3d_r3d_gs
4160
4161! **************************************************************************************************
4162!> \brief pw2 = alpha*pw1 + beta*pw2
4163!> alpha defaults to 1, beta defaults to 1
4164!> \param pw1 ...
4165!> \param pw2 ...
4166!> \param alpha ...
4167!> \param beta ...
4168!> \param allow_noncompatible_grids ...
4169!> \par History
4170!> JGH (21-Feb-2003) : added reference grid functionality
4171!> JGH (01-Dec-2007) : rename and remove complex alpha
4172!> \author apsi
4173!> \note
4174!> Currently only summing up of respective types allowed,
4175!> in order to avoid errors
4176! **************************************************************************************************
4177 SUBROUTINE pw_axpy_r3d_r3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4178
4179 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4180 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
4181 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4182 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4183
4184 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4185
4186 INTEGER :: handle
4187 LOGICAL :: my_allow_noncompatible_grids
4188 REAL(KIND=dp) :: my_alpha, my_beta
4189
4190 CALL timeset(routinen, handle)
4191
4192 my_alpha = 1.0_dp
4193 IF (PRESENT(alpha)) my_alpha = alpha
4194
4195 my_beta = 1.0_dp
4196 IF (PRESENT(beta)) my_beta = beta
4197
4198 my_allow_noncompatible_grids = .false.
4199 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4200
4201 IF (my_beta /= 1.0_dp) THEN
4202 IF (my_beta == 0.0_dp) THEN
4203 CALL pw_zero(pw2)
4204 ELSE
4205!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4206 pw2%array = pw2%array*my_beta
4207!$OMP END PARALLEL WORKSHARE
4208 END IF
4209 END IF
4210
4211 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4212 IF (my_alpha == 1.0_dp) THEN
4213!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4214 pw2%array = pw2%array + pw1%array
4215!$OMP END PARALLEL WORKSHARE
4216 ELSE IF (my_alpha /= 0.0_dp) THEN
4217!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4218 pw2%array = pw2%array + my_alpha* pw1%array
4219!$OMP END PARALLEL WORKSHARE
4220 END IF
4221
4222 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4223
4224 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4225 cpabort("Noncommensurate grids not implemented for 3D grids!")
4226
4227 IF (my_alpha == 1.0_dp) THEN
4228!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4229 pw2%array = pw2%array + pw1%array
4230!$OMP END PARALLEL WORKSHARE
4231 ELSE
4232!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4233 pw2%array = pw2%array + my_alpha* pw1%array
4234!$OMP END PARALLEL WORKSHARE
4235 END IF
4236
4237 ELSE
4238
4239 cpabort("Grids not compatible")
4240
4241 END IF
4242
4243 CALL timestop(handle)
4244
4245 END SUBROUTINE pw_axpy_r3d_r3d_gs
4246
4247! **************************************************************************************************
4248!> \brief pw_out = pw_out + alpha * pw1 * pw2
4249!> alpha defaults to 1
4250!> \param pw_out ...
4251!> \param pw1 ...
4252!> \param pw2 ...
4253!> \param alpha ...
4254!> \author JGH
4255! **************************************************************************************************
4256 SUBROUTINE pw_multiply_r3d_r3d_gs (pw_out, pw1, pw2, alpha)
4257
4258 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw_out
4259 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4260 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4261 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4262
4263 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4264
4265 INTEGER :: handle
4266 REAL(KIND=dp) :: my_alpha
4267
4268 CALL timeset(routinen, handle)
4269
4270 my_alpha = 1.0_dp
4271 IF (PRESENT(alpha)) my_alpha = alpha
4272
4273 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4274 cpabort("pw_multiply not implemented for non-identical grids!")
4275
4276#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
4277 IF (my_alpha == 1.0_dp) THEN
4278!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4279 pw_out%array = pw_out%array + pw1%array* pw2%array
4280!$OMP END PARALLEL WORKSHARE
4281 ELSE
4282!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4283 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
4284!$OMP END PARALLEL WORKSHARE
4285 END IF
4286#else
4287 IF (my_alpha == 1.0_dp) THEN
4288 pw_out%array = pw_out%array + pw1%array* pw2%array
4289 ELSE
4290 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
4291 END IF
4292#endif
4293
4294 CALL timestop(handle)
4295
4296 END SUBROUTINE pw_multiply_r3d_r3d_gs
4297
4298! **************************************************************************************************
4299!> \brief ...
4300!> \param pw1 ...
4301!> \param pw2 ...
4302! **************************************************************************************************
4303 SUBROUTINE pw_multiply_with_r3d_r3d_gs (pw1, pw2)
4304 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw1
4305 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4306
4307 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4308
4309 INTEGER :: handle
4310
4311 CALL timeset(routinen, handle)
4312
4313 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4314 cpabort("Incompatible grids!")
4315
4316!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4317 pw1%array = pw1%array* pw2%array
4318!$OMP END PARALLEL WORKSHARE
4319
4320 CALL timestop(handle)
4321
4322 END SUBROUTINE pw_multiply_with_r3d_r3d_gs
4323
4324! **************************************************************************************************
4325!> \brief Calculate integral over unit cell for functions in plane wave basis
4326!> only returns the real part of it ......
4327!> \param pw1 ...
4328!> \param pw2 ...
4329!> \param sumtype ...
4330!> \param just_sum ...
4331!> \param local_only ...
4332!> \return ...
4333!> \par History
4334!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4335!> \author apsi
4336! **************************************************************************************************
4337 FUNCTION pw_integral_ab_r3d_r3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4338
4339 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4340 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4341 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4342 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4343 REAL(kind=dp) :: integral_value
4344
4345 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_r3d_gs'
4346
4347 INTEGER :: handle, loc_sumtype
4348 LOGICAL :: my_just_sum, my_local_only
4349
4350 CALL timeset(routinen, handle)
4351
4352 loc_sumtype = do_accurate_sum
4353 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4354
4355 my_local_only = .false.
4356 IF (PRESENT(local_only)) my_local_only = local_only
4357
4358 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4359 cpabort("Grids incompatible")
4360 END IF
4361
4362 my_just_sum = .false.
4363 IF (PRESENT(just_sum)) my_just_sum = just_sum
4364
4365 ! do standard sum
4366 IF (loc_sumtype == do_standard_sum) THEN
4367
4368 ! Do standard sum
4369
4370 integral_value = sum(pw1%array*pw2%array)
4371
4372 ELSE
4373
4374 ! Do accurate sum
4375 integral_value = accurate_dot_product(pw1%array, pw2%array)
4376
4377 END IF
4378
4379 IF (.NOT. my_just_sum) THEN
4380 integral_value = integral_value*pw1%pw_grid%vol
4381 END IF
4382
4383
4384 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4385 CALL pw1%pw_grid%para%group%sum(integral_value)
4386
4387 CALL timestop(handle)
4388
4389 END FUNCTION pw_integral_ab_r3d_r3d_gs
4390
4391! **************************************************************************************************
4392!> \brief copy a pw type variable
4393!> \param pw1 ...
4394!> \param pw2 ...
4395!> \par History
4396!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4397!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4398!> JGH (21-Feb-2003) : Code for generalized reference grids
4399!> \author apsi
4400!> \note
4401!> Currently only copying of respective types allowed,
4402!> in order to avoid errors
4403! **************************************************************************************************
4404 SUBROUTINE pw_copy_r3d_c3d_rs (pw1, pw2)
4405
4406 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4407 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
4408
4409 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4410
4411 INTEGER :: handle
4412
4413 CALL timeset(routinen, handle)
4414
4415 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4416 cpabort("Both grids must be either spherical or non-spherical!")
4417 IF (pw1%pw_grid%spherical) &
4418 cpabort("Spherical grids only exist in reciprocal space!")
4419
4420 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4421 cpabort("3D grids must be compatible!")
4422 IF (pw1%pw_grid%spherical) &
4423 cpabort("3D grids must not be spherical!")
4424!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4425 pw2%array(:, :, :) = cmplx(pw1%array(:, :, :), 0.0_dp, kind=dp)
4426!$OMP END PARALLEL WORKSHARE
4427
4428 CALL timestop(handle)
4429
4430 END SUBROUTINE pw_copy_r3d_c3d_rs
4431
4432! **************************************************************************************************
4433!> \brief ...
4434!> \param pw ...
4435!> \param array ...
4436! **************************************************************************************************
4437 SUBROUTINE pw_copy_to_array_r3d_c3d_rs (pw, array)
4438 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
4439 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4440
4441 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4442
4443 INTEGER :: handle
4444
4445 CALL timeset(routinen, handle)
4446
4447!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4448 array(:, :, :) = cmplx(pw%array(:, :, :), 0.0_dp, kind=dp)
4449!$OMP END PARALLEL WORKSHARE
4450
4451 CALL timestop(handle)
4452 END SUBROUTINE pw_copy_to_array_r3d_c3d_rs
4453
4454! **************************************************************************************************
4455!> \brief ...
4456!> \param pw ...
4457!> \param array ...
4458! **************************************************************************************************
4459 SUBROUTINE pw_copy_from_array_r3d_c3d_rs (pw, array)
4460 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
4461 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4462
4463 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4464
4465 INTEGER :: handle
4466
4467 CALL timeset(routinen, handle)
4468
4469!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4470 pw%array = real(array, kind=dp)
4471!$OMP END PARALLEL WORKSHARE
4472
4473 CALL timestop(handle)
4474 END SUBROUTINE pw_copy_from_array_r3d_c3d_rs
4475
4476! **************************************************************************************************
4477!> \brief pw2 = alpha*pw1 + beta*pw2
4478!> alpha defaults to 1, beta defaults to 1
4479!> \param pw1 ...
4480!> \param pw2 ...
4481!> \param alpha ...
4482!> \param beta ...
4483!> \param allow_noncompatible_grids ...
4484!> \par History
4485!> JGH (21-Feb-2003) : added reference grid functionality
4486!> JGH (01-Dec-2007) : rename and remove complex alpha
4487!> \author apsi
4488!> \note
4489!> Currently only summing up of respective types allowed,
4490!> in order to avoid errors
4491! **************************************************************************************************
4492 SUBROUTINE pw_axpy_r3d_c3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4493
4494 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4495 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
4496 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4497 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4498
4499 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4500
4501 INTEGER :: handle
4502 LOGICAL :: my_allow_noncompatible_grids
4503 REAL(KIND=dp) :: my_alpha, my_beta
4504
4505 CALL timeset(routinen, handle)
4506
4507 my_alpha = 1.0_dp
4508 IF (PRESENT(alpha)) my_alpha = alpha
4509
4510 my_beta = 1.0_dp
4511 IF (PRESENT(beta)) my_beta = beta
4512
4513 my_allow_noncompatible_grids = .false.
4514 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4515
4516 IF (my_beta /= 1.0_dp) THEN
4517 IF (my_beta == 0.0_dp) THEN
4518 CALL pw_zero(pw2)
4519 ELSE
4520!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4521 pw2%array = pw2%array*my_beta
4522!$OMP END PARALLEL WORKSHARE
4523 END IF
4524 END IF
4525
4526 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4527 IF (my_alpha == 1.0_dp) THEN
4528!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4529 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4530!$OMP END PARALLEL WORKSHARE
4531 ELSE IF (my_alpha /= 0.0_dp) THEN
4532!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4533 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4534!$OMP END PARALLEL WORKSHARE
4535 END IF
4536
4537 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4538
4539 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4540 cpabort("Noncommensurate grids not implemented for 3D grids!")
4541
4542 IF (my_alpha == 1.0_dp) THEN
4543!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4544 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4545!$OMP END PARALLEL WORKSHARE
4546 ELSE
4547!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4548 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4549!$OMP END PARALLEL WORKSHARE
4550 END IF
4551
4552 ELSE
4553
4554 cpabort("Grids not compatible")
4555
4556 END IF
4557
4558 CALL timestop(handle)
4559
4560 END SUBROUTINE pw_axpy_r3d_c3d_rs
4561
4562! **************************************************************************************************
4563!> \brief pw_out = pw_out + alpha * pw1 * pw2
4564!> alpha defaults to 1
4565!> \param pw_out ...
4566!> \param pw1 ...
4567!> \param pw2 ...
4568!> \param alpha ...
4569!> \author JGH
4570! **************************************************************************************************
4571 SUBROUTINE pw_multiply_r3d_c3d_rs (pw_out, pw1, pw2, alpha)
4572
4573 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw_out
4574 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4575 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4576 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4577
4578 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4579
4580 INTEGER :: handle
4581 REAL(KIND=dp) :: my_alpha
4582
4583 CALL timeset(routinen, handle)
4584
4585 my_alpha = 1.0_dp
4586 IF (PRESENT(alpha)) my_alpha = alpha
4587
4588 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4589 cpabort("pw_multiply not implemented for non-identical grids!")
4590
4591#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
4592 IF (my_alpha == 1.0_dp) THEN
4593!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4594 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4595!$OMP END PARALLEL WORKSHARE
4596 ELSE
4597!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4598 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4599!$OMP END PARALLEL WORKSHARE
4600 END IF
4601#else
4602 IF (my_alpha == 1.0_dp) THEN
4603 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4604 ELSE
4605 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4606 END IF
4607#endif
4608
4609 CALL timestop(handle)
4610
4611 END SUBROUTINE pw_multiply_r3d_c3d_rs
4612
4613! **************************************************************************************************
4614!> \brief ...
4615!> \param pw1 ...
4616!> \param pw2 ...
4617! **************************************************************************************************
4618 SUBROUTINE pw_multiply_with_r3d_c3d_rs (pw1, pw2)
4619 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw1
4620 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4621
4622 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4623
4624 INTEGER :: handle
4625
4626 CALL timeset(routinen, handle)
4627
4628 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4629 cpabort("Incompatible grids!")
4630
4631!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4632 pw1%array = pw1%array* real(pw2%array, kind=dp)
4633!$OMP END PARALLEL WORKSHARE
4634
4635 CALL timestop(handle)
4636
4637 END SUBROUTINE pw_multiply_with_r3d_c3d_rs
4638
4639! **************************************************************************************************
4640!> \brief Calculate integral over unit cell for functions in plane wave basis
4641!> only returns the real part of it ......
4642!> \param pw1 ...
4643!> \param pw2 ...
4644!> \param sumtype ...
4645!> \param just_sum ...
4646!> \param local_only ...
4647!> \return ...
4648!> \par History
4649!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4650!> \author apsi
4651! **************************************************************************************************
4652 FUNCTION pw_integral_ab_r3d_c3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4653
4654 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4655 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4656 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4657 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4658 REAL(kind=dp) :: integral_value
4659
4660 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_c3d_rs'
4661
4662 INTEGER :: handle, loc_sumtype
4663 LOGICAL :: my_just_sum, my_local_only
4664
4665 CALL timeset(routinen, handle)
4666
4667 loc_sumtype = do_accurate_sum
4668 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4669
4670 my_local_only = .false.
4671 IF (PRESENT(local_only)) my_local_only = local_only
4672
4673 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4674 cpabort("Grids incompatible")
4675 END IF
4676
4677 my_just_sum = .false.
4678 IF (PRESENT(just_sum)) my_just_sum = just_sum
4679
4680 ! do standard sum
4681 IF (loc_sumtype == do_standard_sum) THEN
4682
4683 ! Do standard sum
4684
4685 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4686
4687 ELSE
4688
4689 ! Do accurate sum
4690 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4691
4692 END IF
4693
4694 IF (.NOT. my_just_sum) THEN
4695 integral_value = integral_value*pw1%pw_grid%dvol
4696 END IF
4697
4698
4699 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4700 CALL pw1%pw_grid%para%group%sum(integral_value)
4701
4702 CALL timestop(handle)
4703
4704 END FUNCTION pw_integral_ab_r3d_c3d_rs
4705! **************************************************************************************************
4706!> \brief copy a pw type variable
4707!> \param pw1 ...
4708!> \param pw2 ...
4709!> \par History
4710!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4711!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4712!> JGH (21-Feb-2003) : Code for generalized reference grids
4713!> \author apsi
4714!> \note
4715!> Currently only copying of respective types allowed,
4716!> in order to avoid errors
4717! **************************************************************************************************
4718 SUBROUTINE pw_copy_r3d_c3d_gs (pw1, pw2)
4719
4720 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4721 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
4722
4723 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4724
4725 INTEGER :: handle
4726
4727 CALL timeset(routinen, handle)
4728
4729 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4730 cpabort("Both grids must be either spherical or non-spherical!")
4731
4732 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4733 cpabort("3D grids must be compatible!")
4734 IF (pw1%pw_grid%spherical) &
4735 cpabort("3D grids must not be spherical!")
4736!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4737 pw2%array(:, :, :) = cmplx(pw1%array(:, :, :), 0.0_dp, kind=dp)
4738!$OMP END PARALLEL WORKSHARE
4739
4740 CALL timestop(handle)
4741
4742 END SUBROUTINE pw_copy_r3d_c3d_gs
4743
4744! **************************************************************************************************
4745!> \brief ...
4746!> \param pw ...
4747!> \param array ...
4748! **************************************************************************************************
4749 SUBROUTINE pw_copy_to_array_r3d_c3d_gs (pw, array)
4750 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4751 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4752
4753 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4754
4755 INTEGER :: handle
4756
4757 CALL timeset(routinen, handle)
4758
4759!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4760 array(:, :, :) = cmplx(pw%array(:, :, :), 0.0_dp, kind=dp)
4761!$OMP END PARALLEL WORKSHARE
4762
4763 CALL timestop(handle)
4764 END SUBROUTINE pw_copy_to_array_r3d_c3d_gs
4765
4766! **************************************************************************************************
4767!> \brief ...
4768!> \param pw ...
4769!> \param array ...
4770! **************************************************************************************************
4771 SUBROUTINE pw_copy_from_array_r3d_c3d_gs (pw, array)
4772 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4773 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4774
4775 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4776
4777 INTEGER :: handle
4778
4779 CALL timeset(routinen, handle)
4780
4781!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4782 pw%array = real(array, kind=dp)
4783!$OMP END PARALLEL WORKSHARE
4784
4785 CALL timestop(handle)
4786 END SUBROUTINE pw_copy_from_array_r3d_c3d_gs
4787
4788! **************************************************************************************************
4789!> \brief pw2 = alpha*pw1 + beta*pw2
4790!> alpha defaults to 1, beta defaults to 1
4791!> \param pw1 ...
4792!> \param pw2 ...
4793!> \param alpha ...
4794!> \param beta ...
4795!> \param allow_noncompatible_grids ...
4796!> \par History
4797!> JGH (21-Feb-2003) : added reference grid functionality
4798!> JGH (01-Dec-2007) : rename and remove complex alpha
4799!> \author apsi
4800!> \note
4801!> Currently only summing up of respective types allowed,
4802!> in order to avoid errors
4803! **************************************************************************************************
4804 SUBROUTINE pw_axpy_r3d_c3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4805
4806 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4807 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
4808 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4809 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4810
4811 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4812
4813 INTEGER :: handle
4814 LOGICAL :: my_allow_noncompatible_grids
4815 REAL(KIND=dp) :: my_alpha, my_beta
4816
4817 CALL timeset(routinen, handle)
4818
4819 my_alpha = 1.0_dp
4820 IF (PRESENT(alpha)) my_alpha = alpha
4821
4822 my_beta = 1.0_dp
4823 IF (PRESENT(beta)) my_beta = beta
4824
4825 my_allow_noncompatible_grids = .false.
4826 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4827
4828 IF (my_beta /= 1.0_dp) THEN
4829 IF (my_beta == 0.0_dp) THEN
4830 CALL pw_zero(pw2)
4831 ELSE
4832!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4833 pw2%array = pw2%array*my_beta
4834!$OMP END PARALLEL WORKSHARE
4835 END IF
4836 END IF
4837
4838 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4839 IF (my_alpha == 1.0_dp) THEN
4840!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4841 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4842!$OMP END PARALLEL WORKSHARE
4843 ELSE IF (my_alpha /= 0.0_dp) THEN
4844!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4845 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4846!$OMP END PARALLEL WORKSHARE
4847 END IF
4848
4849 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4850
4851 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4852 cpabort("Noncommensurate grids not implemented for 3D grids!")
4853
4854 IF (my_alpha == 1.0_dp) THEN
4855!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4856 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4857!$OMP END PARALLEL WORKSHARE
4858 ELSE
4859!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4860 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4861!$OMP END PARALLEL WORKSHARE
4862 END IF
4863
4864 ELSE
4865
4866 cpabort("Grids not compatible")
4867
4868 END IF
4869
4870 CALL timestop(handle)
4871
4872 END SUBROUTINE pw_axpy_r3d_c3d_gs
4873
4874! **************************************************************************************************
4875!> \brief pw_out = pw_out + alpha * pw1 * pw2
4876!> alpha defaults to 1
4877!> \param pw_out ...
4878!> \param pw1 ...
4879!> \param pw2 ...
4880!> \param alpha ...
4881!> \author JGH
4882! **************************************************************************************************
4883 SUBROUTINE pw_multiply_r3d_c3d_gs (pw_out, pw1, pw2, alpha)
4884
4885 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw_out
4886 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4887 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4888 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4889
4890 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4891
4892 INTEGER :: handle
4893 REAL(KIND=dp) :: my_alpha
4894
4895 CALL timeset(routinen, handle)
4896
4897 my_alpha = 1.0_dp
4898 IF (PRESENT(alpha)) my_alpha = alpha
4899
4900 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4901 cpabort("pw_multiply not implemented for non-identical grids!")
4902
4903#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
4904 IF (my_alpha == 1.0_dp) THEN
4905!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4906 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4907!$OMP END PARALLEL WORKSHARE
4908 ELSE
4909!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4910 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4911!$OMP END PARALLEL WORKSHARE
4912 END IF
4913#else
4914 IF (my_alpha == 1.0_dp) THEN
4915 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4916 ELSE
4917 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4918 END IF
4919#endif
4920
4921 CALL timestop(handle)
4922
4923 END SUBROUTINE pw_multiply_r3d_c3d_gs
4924
4925! **************************************************************************************************
4926!> \brief ...
4927!> \param pw1 ...
4928!> \param pw2 ...
4929! **************************************************************************************************
4930 SUBROUTINE pw_multiply_with_r3d_c3d_gs (pw1, pw2)
4931 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw1
4932 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4933
4934 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4935
4936 INTEGER :: handle
4937
4938 CALL timeset(routinen, handle)
4939
4940 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4941 cpabort("Incompatible grids!")
4942
4943!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4944 pw1%array = pw1%array* real(pw2%array, kind=dp)
4945!$OMP END PARALLEL WORKSHARE
4946
4947 CALL timestop(handle)
4948
4949 END SUBROUTINE pw_multiply_with_r3d_c3d_gs
4950
4951! **************************************************************************************************
4952!> \brief Calculate integral over unit cell for functions in plane wave basis
4953!> only returns the real part of it ......
4954!> \param pw1 ...
4955!> \param pw2 ...
4956!> \param sumtype ...
4957!> \param just_sum ...
4958!> \param local_only ...
4959!> \return ...
4960!> \par History
4961!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4962!> \author apsi
4963! **************************************************************************************************
4964 FUNCTION pw_integral_ab_r3d_c3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4965
4966 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4967 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4968 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4969 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4970 REAL(kind=dp) :: integral_value
4971
4972 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_r3d_c3d_gs'
4973
4974 INTEGER :: handle, loc_sumtype
4975 LOGICAL :: my_just_sum, my_local_only
4976
4977 CALL timeset(routinen, handle)
4978
4979 loc_sumtype = do_accurate_sum
4980 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4981
4982 my_local_only = .false.
4983 IF (PRESENT(local_only)) my_local_only = local_only
4984
4985 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4986 cpabort("Grids incompatible")
4987 END IF
4988
4989 my_just_sum = .false.
4990 IF (PRESENT(just_sum)) my_just_sum = just_sum
4991
4992 ! do standard sum
4993 IF (loc_sumtype == do_standard_sum) THEN
4994
4995 ! Do standard sum
4996
4997 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4998
4999 ELSE
5000
5001 ! Do accurate sum
5002 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
5003
5004 END IF
5005
5006 IF (.NOT. my_just_sum) THEN
5007 integral_value = integral_value*pw1%pw_grid%vol
5008 END IF
5009
5010
5011 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5012 CALL pw1%pw_grid%para%group%sum(integral_value)
5013
5014 CALL timestop(handle)
5015
5016 END FUNCTION pw_integral_ab_r3d_c3d_gs
5017
5018! **************************************************************************************************
5019!> \brief copy a pw type variable
5020!> \param pw1 ...
5021!> \param pw2 ...
5022!> \par History
5023!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5024!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5025!> JGH (21-Feb-2003) : Code for generalized reference grids
5026!> \author apsi
5027!> \note
5028!> Currently only copying of respective types allowed,
5029!> in order to avoid errors
5030! **************************************************************************************************
5031 SUBROUTINE pw_copy_c1d_r1d_rs (pw1, pw2)
5032
5033 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5034 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
5035
5036 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5037
5038 INTEGER :: handle
5039 INTEGER :: i, j, ng, ng1, ng2, ns
5040
5041 CALL timeset(routinen, handle)
5042
5043 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
5044 cpabort("Both grids must be either spherical or non-spherical!")
5045 IF (pw1%pw_grid%spherical) &
5046 cpabort("Spherical grids only exist in reciprocal space!")
5047
5048 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5049 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
5050 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
5051 ng1 = SIZE(pw1%array)
5052 ng2 = SIZE(pw2%array)
5053 ng = min(ng1, ng2)
5054!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
5055 pw2%array(1:ng) = real(pw1%array(1:ng), kind=dp)
5056!$OMP END PARALLEL WORKSHARE
5057 IF (ng2 > ng) THEN
5058!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
5059 pw2%array(ng + 1:ng2) = 0.0_dp
5060!$OMP END PARALLEL WORKSHARE
5061 END IF
5062 ELSE
5063 cpabort("Copies between spherical grids require compatible grids!")
5064 END IF
5065 ELSE
5066 ng1 = SIZE(pw1%array)
5067 ng2 = SIZE(pw2%array)
5068 ns = 2*max(ng1, ng2)
5069
5070 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5071 IF (ng1 >= ng2) THEN
5072!$OMP PARALLEL DO DEFAULT(NONE) &
5073!$OMP PRIVATE(i,j) &
5074!$OMP SHARED(ng2, pw1, pw2)
5075 DO i = 1, ng2
5076 j = pw2%pw_grid%gidx(i)
5077 pw2%array(i) = real(pw1%array(j), kind=dp)
5078 END DO
5079!$OMP END PARALLEL DO
5080 ELSE
5081 CALL pw_zero(pw2)
5082!$OMP PARALLEL DO DEFAULT(NONE) &
5083!$OMP PRIVATE(i,j) &
5084!$OMP SHARED(ng1, pw1, pw2)
5085 DO i = 1, ng1
5086 j = pw2%pw_grid%gidx(i)
5087 pw2%array(j) = real(pw1%array(i), kind=dp)
5088 END DO
5089!$OMP END PARALLEL DO
5090 END IF
5091 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5092 IF (ng1 >= ng2) THEN
5093!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5094 DO i = 1, ng2
5095 j = pw1%pw_grid%gidx(i)
5096 pw2%array(i) = real(pw1%array(j), kind=dp)
5097 END DO
5098!$OMP END PARALLEL DO
5099 ELSE
5100 CALL pw_zero(pw2)
5101!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5102 DO i = 1, ng1
5103 j = pw1%pw_grid%gidx(i)
5104 pw2%array(j) = real(pw1%array(i), kind=dp)
5105 END DO
5106!$OMP END PARALLEL DO
5107 END IF
5108 ELSE
5109 cpabort("Copy not implemented!")
5110 END IF
5111
5112 END IF
5113
5114 ELSE
5115!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5116 pw2%array = real(pw1%array, kind=dp)
5117!$OMP END PARALLEL WORKSHARE
5118 END IF
5119
5120 CALL timestop(handle)
5121
5122 END SUBROUTINE pw_copy_c1d_r1d_rs
5123
5124! **************************************************************************************************
5125!> \brief ...
5126!> \param pw ...
5127!> \param array ...
5128! **************************************************************************************************
5129 SUBROUTINE pw_copy_to_array_c1d_r1d_rs (pw, array)
5130 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5131 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
5132
5133 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
5134
5135 INTEGER :: handle
5136
5137 CALL timeset(routinen, handle)
5138
5139!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5140 array(:) = real(pw%array(:), kind=dp)
5141!$OMP END PARALLEL WORKSHARE
5142
5143 CALL timestop(handle)
5144 END SUBROUTINE pw_copy_to_array_c1d_r1d_rs
5145
5146! **************************************************************************************************
5147!> \brief ...
5148!> \param pw ...
5149!> \param array ...
5150! **************************************************************************************************
5151 SUBROUTINE pw_copy_from_array_c1d_r1d_rs (pw, array)
5152 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5153 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
5154
5155 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
5156
5157 INTEGER :: handle
5158
5159 CALL timeset(routinen, handle)
5160
5161!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5162 pw%array = cmplx(array, 0.0_dp, kind=dp)
5163!$OMP END PARALLEL WORKSHARE
5164
5165 CALL timestop(handle)
5166 END SUBROUTINE pw_copy_from_array_c1d_r1d_rs
5167
5168! **************************************************************************************************
5169!> \brief pw2 = alpha*pw1 + beta*pw2
5170!> alpha defaults to 1, beta defaults to 1
5171!> \param pw1 ...
5172!> \param pw2 ...
5173!> \param alpha ...
5174!> \param beta ...
5175!> \param allow_noncompatible_grids ...
5176!> \par History
5177!> JGH (21-Feb-2003) : added reference grid functionality
5178!> JGH (01-Dec-2007) : rename and remove complex alpha
5179!> \author apsi
5180!> \note
5181!> Currently only summing up of respective types allowed,
5182!> in order to avoid errors
5183! **************************************************************************************************
5184 SUBROUTINE pw_axpy_c1d_r1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
5185
5186 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5187 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
5188 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
5189 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
5190
5191 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
5192
5193 INTEGER :: handle
5194 LOGICAL :: my_allow_noncompatible_grids
5195 REAL(KIND=dp) :: my_alpha, my_beta
5196 INTEGER :: i, j, ng, ng1, ng2
5197
5198 CALL timeset(routinen, handle)
5199
5200 my_alpha = 1.0_dp
5201 IF (PRESENT(alpha)) my_alpha = alpha
5202
5203 my_beta = 1.0_dp
5204 IF (PRESENT(beta)) my_beta = beta
5205
5206 my_allow_noncompatible_grids = .false.
5207 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
5208
5209 IF (my_beta /= 1.0_dp) THEN
5210 IF (my_beta == 0.0_dp) THEN
5211 CALL pw_zero(pw2)
5212 ELSE
5213!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
5214 pw2%array = pw2%array*my_beta
5215!$OMP END PARALLEL WORKSHARE
5216 END IF
5217 END IF
5218
5219 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5220
5221 IF (my_alpha == 1.0_dp) THEN
5222!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5223 pw2%array = pw2%array + real(pw1%array, kind=dp)
5224!$OMP END PARALLEL WORKSHARE
5225 ELSE IF (my_alpha /= 0.0_dp) THEN
5226!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
5227 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
5228!$OMP END PARALLEL WORKSHARE
5229 END IF
5230
5231 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
5232
5233 ng1 = SIZE(pw1%array)
5234 ng2 = SIZE(pw2%array)
5235 ng = min(ng1, ng2)
5236
5237 IF (pw1%pw_grid%spherical) THEN
5238 IF (my_alpha == 1.0_dp) THEN
5239!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5240 DO i = 1, ng
5241 pw2%array(i) = pw2%array(i) + real(pw1%array(i), kind=dp)
5242 END DO
5243!$OMP END PARALLEL DO
5244 ELSE IF (my_alpha /= 0.0_dp) THEN
5245!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
5246 DO i = 1, ng
5247 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(i), kind=dp)
5248 END DO
5249!$OMP END PARALLEL DO
5250 END IF
5251 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5252 IF (ng1 >= ng2) THEN
5253 IF (my_alpha == 1.0_dp) THEN
5254!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5255 DO i = 1, ng
5256 j = pw2%pw_grid%gidx(i)
5257 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5258 END DO
5259!$OMP END PARALLEL DO
5260 ELSE IF (my_alpha /= 0.0_dp) THEN
5261!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5262 DO i = 1, ng
5263 j = pw2%pw_grid%gidx(i)
5264 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5265 END DO
5266!$OMP END PARALLEL DO
5267 END IF
5268 ELSE
5269 IF (my_alpha == 1.0_dp) THEN
5270!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5271 DO i = 1, ng
5272 j = pw2%pw_grid%gidx(i)
5273 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5274 END DO
5275!$OMP END PARALLEL DO
5276 ELSE IF (my_alpha /= 0.0_dp) THEN
5277!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5278 DO i = 1, ng
5279 j = pw2%pw_grid%gidx(i)
5280 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5281 END DO
5282!$OMP END PARALLEL DO
5283 END IF
5284 END IF
5285 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5286 IF (ng1 >= ng2) THEN
5287 IF (my_alpha == 1.0_dp) THEN
5288!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5289 DO i = 1, ng
5290 j = pw1%pw_grid%gidx(i)
5291 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5292 END DO
5293!$OMP END PARALLEL DO
5294 ELSE IF (my_alpha /= 0.0_dp) THEN
5295!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5296 DO i = 1, ng
5297 j = pw1%pw_grid%gidx(i)
5298 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5299 END DO
5300!$OMP END PARALLEL DO
5301 END IF
5302 ELSE
5303 IF (my_alpha == 1.0_dp) THEN
5304!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5305 DO i = 1, ng
5306 j = pw1%pw_grid%gidx(i)
5307 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5308 END DO
5309!$OMP END PARALLEL DO
5310 ELSE IF (my_alpha /= 0.0_dp) THEN
5311!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5312 DO i = 1, ng
5313 j = pw1%pw_grid%gidx(i)
5314 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5315 END DO
5316!$OMP END PARALLEL DO
5317 END IF
5318 END IF
5319 ELSE
5320 cpabort("Grids not compatible")
5321 END IF
5322
5323 ELSE
5324
5325 cpabort("Grids not compatible")
5326
5327 END IF
5328
5329 CALL timestop(handle)
5330
5331 END SUBROUTINE pw_axpy_c1d_r1d_rs
5332
5333! **************************************************************************************************
5334!> \brief pw_out = pw_out + alpha * pw1 * pw2
5335!> alpha defaults to 1
5336!> \param pw_out ...
5337!> \param pw1 ...
5338!> \param pw2 ...
5339!> \param alpha ...
5340!> \author JGH
5341! **************************************************************************************************
5342 SUBROUTINE pw_multiply_c1d_r1d_rs (pw_out, pw1, pw2, alpha)
5343
5344 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw_out
5345 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5346 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5347 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
5348
5349 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
5350
5351 INTEGER :: handle
5352 REAL(KIND=dp) :: my_alpha
5353
5354 CALL timeset(routinen, handle)
5355
5356 my_alpha = 1.0_dp
5357 IF (PRESENT(alpha)) my_alpha = alpha
5358
5359 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
5360 cpabort("pw_multiply not implemented for non-identical grids!")
5361
5362#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
5363 IF (my_alpha == 1.0_dp) THEN
5364!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
5365 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5366!$OMP END PARALLEL WORKSHARE
5367 ELSE
5368!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
5369 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5370!$OMP END PARALLEL WORKSHARE
5371 END IF
5372#else
5373 IF (my_alpha == 1.0_dp) THEN
5374 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5375 ELSE
5376 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5377 END IF
5378#endif
5379
5380 CALL timestop(handle)
5381
5382 END SUBROUTINE pw_multiply_c1d_r1d_rs
5383
5384! **************************************************************************************************
5385!> \brief ...
5386!> \param pw1 ...
5387!> \param pw2 ...
5388! **************************************************************************************************
5389 SUBROUTINE pw_multiply_with_c1d_r1d_rs (pw1, pw2)
5390 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw1
5391 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5392
5393 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
5394
5395 INTEGER :: handle
5396
5397 CALL timeset(routinen, handle)
5398
5399 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
5400 cpabort("Incompatible grids!")
5401
5402!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
5403 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5404!$OMP END PARALLEL WORKSHARE
5405
5406 CALL timestop(handle)
5407
5408 END SUBROUTINE pw_multiply_with_c1d_r1d_rs
5409
5410! **************************************************************************************************
5411!> \brief Calculate integral over unit cell for functions in plane wave basis
5412!> only returns the real part of it ......
5413!> \param pw1 ...
5414!> \param pw2 ...
5415!> \param sumtype ...
5416!> \param just_sum ...
5417!> \param local_only ...
5418!> \return ...
5419!> \par History
5420!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
5421!> \author apsi
5422! **************************************************************************************************
5423 FUNCTION pw_integral_ab_c1d_r1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
5424
5425 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5426 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5427 INTEGER, INTENT(IN), OPTIONAL :: sumtype
5428 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
5429 REAL(kind=dp) :: integral_value
5430
5431 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_r1d_rs'
5432
5433 INTEGER :: handle, loc_sumtype
5434 LOGICAL :: my_just_sum, my_local_only
5435
5436 CALL timeset(routinen, handle)
5437
5438 loc_sumtype = do_accurate_sum
5439 IF (PRESENT(sumtype)) loc_sumtype = sumtype
5440
5441 my_local_only = .false.
5442 IF (PRESENT(local_only)) my_local_only = local_only
5443
5444 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5445 cpabort("Grids incompatible")
5446 END IF
5447
5448 my_just_sum = .false.
5449 IF (PRESENT(just_sum)) my_just_sum = just_sum
5450
5451 ! do standard sum
5452 IF (loc_sumtype == do_standard_sum) THEN
5453
5454 ! Do standard sum
5455
5456 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5457
5458 ELSE
5459
5460 ! Do accurate sum
5461 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5462
5463 END IF
5464
5465 IF (.NOT. my_just_sum) THEN
5466 integral_value = integral_value*pw1%pw_grid%dvol
5467 END IF
5468
5469 IF (pw1%pw_grid%grid_span == halfspace) THEN
5470 integral_value = 2.0_dp*integral_value
5471 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
5472 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
5473 END IF
5474
5475 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5476 CALL pw1%pw_grid%para%group%sum(integral_value)
5477
5478 CALL timestop(handle)
5479
5480 END FUNCTION pw_integral_ab_c1d_r1d_rs
5481! **************************************************************************************************
5482!> \brief copy a pw type variable
5483!> \param pw1 ...
5484!> \param pw2 ...
5485!> \par History
5486!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5487!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5488!> JGH (21-Feb-2003) : Code for generalized reference grids
5489!> \author apsi
5490!> \note
5491!> Currently only copying of respective types allowed,
5492!> in order to avoid errors
5493! **************************************************************************************************
5494 SUBROUTINE pw_copy_c1d_r1d_gs (pw1, pw2)
5495
5496 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5497 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
5498
5499 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5500
5501 INTEGER :: handle
5502 INTEGER :: i, j, ng, ng1, ng2, ns
5503
5504 CALL timeset(routinen, handle)
5505
5506 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
5507 cpabort("Both grids must be either spherical or non-spherical!")
5508
5509 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5510 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
5511 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
5512 ng1 = SIZE(pw1%array)
5513 ng2 = SIZE(pw2%array)
5514 ng = min(ng1, ng2)
5515!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
5516 pw2%array(1:ng) = real(pw1%array(1:ng), kind=dp)
5517!$OMP END PARALLEL WORKSHARE
5518 IF (ng2 > ng) THEN
5519!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
5520 pw2%array(ng + 1:ng2) = 0.0_dp
5521!$OMP END PARALLEL WORKSHARE
5522 END IF
5523 ELSE
5524 cpabort("Copies between spherical grids require compatible grids!")
5525 END IF
5526 ELSE
5527 ng1 = SIZE(pw1%array)
5528 ng2 = SIZE(pw2%array)
5529 ns = 2*max(ng1, ng2)
5530
5531 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5532 IF (ng1 >= ng2) THEN
5533!$OMP PARALLEL DO DEFAULT(NONE) &
5534!$OMP PRIVATE(i,j) &
5535!$OMP SHARED(ng2, pw1, pw2)
5536 DO i = 1, ng2
5537 j = pw2%pw_grid%gidx(i)
5538 pw2%array(i) = real(pw1%array(j), kind=dp)
5539 END DO
5540!$OMP END PARALLEL DO
5541 ELSE
5542 CALL pw_zero(pw2)
5543!$OMP PARALLEL DO DEFAULT(NONE) &
5544!$OMP PRIVATE(i,j) &
5545!$OMP SHARED(ng1, pw1, pw2)
5546 DO i = 1, ng1
5547 j = pw2%pw_grid%gidx(i)
5548 pw2%array(j) = real(pw1%array(i), kind=dp)
5549 END DO
5550!$OMP END PARALLEL DO
5551 END IF
5552 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5553 IF (ng1 >= ng2) THEN
5554!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5555 DO i = 1, ng2
5556 j = pw1%pw_grid%gidx(i)
5557 pw2%array(i) = real(pw1%array(j), kind=dp)
5558 END DO
5559!$OMP END PARALLEL DO
5560 ELSE
5561 CALL pw_zero(pw2)
5562!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5563 DO i = 1, ng1
5564 j = pw1%pw_grid%gidx(i)
5565 pw2%array(j) = real(pw1%array(i), kind=dp)
5566 END DO
5567!$OMP END PARALLEL DO
5568 END IF
5569 ELSE
5570 cpabort("Copy not implemented!")
5571 END IF
5572
5573 END IF
5574
5575 ELSE
5576!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5577 pw2%array = real(pw1%array, kind=dp)
5578!$OMP END PARALLEL WORKSHARE
5579 END IF
5580
5581 CALL timestop(handle)
5582
5583 END SUBROUTINE pw_copy_c1d_r1d_gs
5584
5585! **************************************************************************************************
5586!> \brief ...
5587!> \param pw ...
5588!> \param array ...
5589! **************************************************************************************************
5590 SUBROUTINE pw_copy_to_array_c1d_r1d_gs (pw, array)
5591 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
5592 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
5593
5594 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
5595
5596 INTEGER :: handle
5597
5598 CALL timeset(routinen, handle)
5599
5600!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5601 array(:) = real(pw%array(:), kind=dp)
5602!$OMP END PARALLEL WORKSHARE
5603
5604 CALL timestop(handle)
5605 END SUBROUTINE pw_copy_to_array_c1d_r1d_gs
5606
5607! **************************************************************************************************
5608!> \brief ...
5609!> \param pw ...
5610!> \param array ...
5611! **************************************************************************************************
5612 SUBROUTINE pw_copy_from_array_c1d_r1d_gs (pw, array)
5613 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
5614 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
5615
5616 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
5617
5618 INTEGER :: handle
5619
5620 CALL timeset(routinen, handle)
5621
5622!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5623 pw%array = cmplx(array, 0.0_dp, kind=dp)
5624!$OMP END PARALLEL WORKSHARE
5625
5626 CALL timestop(handle)
5627 END SUBROUTINE pw_copy_from_array_c1d_r1d_gs
5628
5629! **************************************************************************************************
5630!> \brief pw2 = alpha*pw1 + beta*pw2
5631!> alpha defaults to 1, beta defaults to 1
5632!> \param pw1 ...
5633!> \param pw2 ...
5634!> \param alpha ...
5635!> \param beta ...
5636!> \param allow_noncompatible_grids ...
5637!> \par History
5638!> JGH (21-Feb-2003) : added reference grid functionality
5639!> JGH (01-Dec-2007) : rename and remove complex alpha
5640!> \author apsi
5641!> \note
5642!> Currently only summing up of respective types allowed,
5643!> in order to avoid errors
5644! **************************************************************************************************
5645 SUBROUTINE pw_axpy_c1d_r1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
5646
5647 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5648 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
5649 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
5650 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
5651
5652 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
5653
5654 INTEGER :: handle
5655 LOGICAL :: my_allow_noncompatible_grids
5656 REAL(KIND=dp) :: my_alpha, my_beta
5657 INTEGER :: i, j, ng, ng1, ng2
5658
5659 CALL timeset(routinen, handle)
5660
5661 my_alpha = 1.0_dp
5662 IF (PRESENT(alpha)) my_alpha = alpha
5663
5664 my_beta = 1.0_dp
5665 IF (PRESENT(beta)) my_beta = beta
5666
5667 my_allow_noncompatible_grids = .false.
5668 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
5669
5670 IF (my_beta /= 1.0_dp) THEN
5671 IF (my_beta == 0.0_dp) THEN
5672 CALL pw_zero(pw2)
5673 ELSE
5674!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
5675 pw2%array = pw2%array*my_beta
5676!$OMP END PARALLEL WORKSHARE
5677 END IF
5678 END IF
5679
5680 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5681
5682 IF (my_alpha == 1.0_dp) THEN
5683!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5684 pw2%array = pw2%array + real(pw1%array, kind=dp)
5685!$OMP END PARALLEL WORKSHARE
5686 ELSE IF (my_alpha /= 0.0_dp) THEN
5687!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
5688 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
5689!$OMP END PARALLEL WORKSHARE
5690 END IF
5691
5692 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
5693
5694 ng1 = SIZE(pw1%array)
5695 ng2 = SIZE(pw2%array)
5696 ng = min(ng1, ng2)
5697
5698 IF (pw1%pw_grid%spherical) THEN
5699 IF (my_alpha == 1.0_dp) THEN
5700!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5701 DO i = 1, ng
5702 pw2%array(i) = pw2%array(i) + real(pw1%array(i), kind=dp)
5703 END DO
5704!$OMP END PARALLEL DO
5705 ELSE IF (my_alpha /= 0.0_dp) THEN
5706!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
5707 DO i = 1, ng
5708 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(i), kind=dp)
5709 END DO
5710!$OMP END PARALLEL DO
5711 END IF
5712 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5713 IF (ng1 >= ng2) THEN
5714 IF (my_alpha == 1.0_dp) THEN
5715!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5716 DO i = 1, ng
5717 j = pw2%pw_grid%gidx(i)
5718 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5719 END DO
5720!$OMP END PARALLEL DO
5721 ELSE IF (my_alpha /= 0.0_dp) THEN
5722!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5723 DO i = 1, ng
5724 j = pw2%pw_grid%gidx(i)
5725 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5726 END DO
5727!$OMP END PARALLEL DO
5728 END IF
5729 ELSE
5730 IF (my_alpha == 1.0_dp) THEN
5731!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5732 DO i = 1, ng
5733 j = pw2%pw_grid%gidx(i)
5734 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5735 END DO
5736!$OMP END PARALLEL DO
5737 ELSE IF (my_alpha /= 0.0_dp) THEN
5738!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5739 DO i = 1, ng
5740 j = pw2%pw_grid%gidx(i)
5741 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5742 END DO
5743!$OMP END PARALLEL DO
5744 END IF
5745 END IF
5746 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5747 IF (ng1 >= ng2) THEN
5748 IF (my_alpha == 1.0_dp) THEN
5749!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5750 DO i = 1, ng
5751 j = pw1%pw_grid%gidx(i)
5752 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5753 END DO
5754!$OMP END PARALLEL DO
5755 ELSE IF (my_alpha /= 0.0_dp) THEN
5756!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5757 DO i = 1, ng
5758 j = pw1%pw_grid%gidx(i)
5759 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5760 END DO
5761!$OMP END PARALLEL DO
5762 END IF
5763 ELSE
5764 IF (my_alpha == 1.0_dp) THEN
5765!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5766 DO i = 1, ng
5767 j = pw1%pw_grid%gidx(i)
5768 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5769 END DO
5770!$OMP END PARALLEL DO
5771 ELSE IF (my_alpha /= 0.0_dp) THEN
5772!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5773 DO i = 1, ng
5774 j = pw1%pw_grid%gidx(i)
5775 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5776 END DO
5777!$OMP END PARALLEL DO
5778 END IF
5779 END IF
5780 ELSE
5781 cpabort("Grids not compatible")
5782 END IF
5783
5784 ELSE
5785
5786 cpabort("Grids not compatible")
5787
5788 END IF
5789
5790 CALL timestop(handle)
5791
5792 END SUBROUTINE pw_axpy_c1d_r1d_gs
5793
5794! **************************************************************************************************
5795!> \brief pw_out = pw_out + alpha * pw1 * pw2
5796!> alpha defaults to 1
5797!> \param pw_out ...
5798!> \param pw1 ...
5799!> \param pw2 ...
5800!> \param alpha ...
5801!> \author JGH
5802! **************************************************************************************************
5803 SUBROUTINE pw_multiply_c1d_r1d_gs (pw_out, pw1, pw2, alpha)
5804
5805 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw_out
5806 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5807 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5808 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
5809
5810 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
5811
5812 INTEGER :: handle
5813 REAL(KIND=dp) :: my_alpha
5814
5815 CALL timeset(routinen, handle)
5816
5817 my_alpha = 1.0_dp
5818 IF (PRESENT(alpha)) my_alpha = alpha
5819
5820 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
5821 cpabort("pw_multiply not implemented for non-identical grids!")
5822
5823#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
5824 IF (my_alpha == 1.0_dp) THEN
5825!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
5826 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5827!$OMP END PARALLEL WORKSHARE
5828 ELSE
5829!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
5830 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5831!$OMP END PARALLEL WORKSHARE
5832 END IF
5833#else
5834 IF (my_alpha == 1.0_dp) THEN
5835 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5836 ELSE
5837 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5838 END IF
5839#endif
5840
5841 CALL timestop(handle)
5842
5843 END SUBROUTINE pw_multiply_c1d_r1d_gs
5844
5845! **************************************************************************************************
5846!> \brief ...
5847!> \param pw1 ...
5848!> \param pw2 ...
5849! **************************************************************************************************
5850 SUBROUTINE pw_multiply_with_c1d_r1d_gs (pw1, pw2)
5851 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw1
5852 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5853
5854 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
5855
5856 INTEGER :: handle
5857
5858 CALL timeset(routinen, handle)
5859
5860 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
5861 cpabort("Incompatible grids!")
5862
5863!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
5864 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5865!$OMP END PARALLEL WORKSHARE
5866
5867 CALL timestop(handle)
5868
5869 END SUBROUTINE pw_multiply_with_c1d_r1d_gs
5870
5871! **************************************************************************************************
5872!> \brief Calculate integral over unit cell for functions in plane wave basis
5873!> only returns the real part of it ......
5874!> \param pw1 ...
5875!> \param pw2 ...
5876!> \param sumtype ...
5877!> \param just_sum ...
5878!> \param local_only ...
5879!> \return ...
5880!> \par History
5881!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
5882!> \author apsi
5883! **************************************************************************************************
5884 FUNCTION pw_integral_ab_c1d_r1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
5885
5886 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5887 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5888 INTEGER, INTENT(IN), OPTIONAL :: sumtype
5889 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
5890 REAL(kind=dp) :: integral_value
5891
5892 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_r1d_gs'
5893
5894 INTEGER :: handle, loc_sumtype
5895 LOGICAL :: my_just_sum, my_local_only
5896
5897 CALL timeset(routinen, handle)
5898
5899 loc_sumtype = do_accurate_sum
5900 IF (PRESENT(sumtype)) loc_sumtype = sumtype
5901
5902 my_local_only = .false.
5903 IF (PRESENT(local_only)) my_local_only = local_only
5904
5905 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5906 cpabort("Grids incompatible")
5907 END IF
5908
5909 my_just_sum = .false.
5910 IF (PRESENT(just_sum)) my_just_sum = just_sum
5911
5912 ! do standard sum
5913 IF (loc_sumtype == do_standard_sum) THEN
5914
5915 ! Do standard sum
5916
5917 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5918
5919 ELSE
5920
5921 ! Do accurate sum
5922 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5923
5924 END IF
5925
5926 IF (.NOT. my_just_sum) THEN
5927 integral_value = integral_value*pw1%pw_grid%vol
5928 END IF
5929
5930 IF (pw1%pw_grid%grid_span == halfspace) THEN
5931 integral_value = 2.0_dp*integral_value
5932 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
5933 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
5934 END IF
5935
5936 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5937 CALL pw1%pw_grid%para%group%sum(integral_value)
5938
5939 CALL timestop(handle)
5940
5941 END FUNCTION pw_integral_ab_c1d_r1d_gs
5942
5943! **************************************************************************************************
5944!> \brief ...
5945!> \param pw1 ...
5946!> \param pw2 ...
5947!> \return ...
5948! **************************************************************************************************
5949 FUNCTION pw_integral_a2b_c1d_r1d (pw1, pw2) RESULT(integral_value)
5950
5951 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5952 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5953 REAL(kind=dp) :: integral_value
5954
5955 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
5956
5957 INTEGER :: handle
5958
5959 CALL timeset(routinen, handle)
5960
5961 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5962 cpabort("Grids incompatible")
5963 END IF
5964
5965 integral_value = accurate_sum(real(conjg(pw1%array), kind=dp)*pw2%array*pw1%pw_grid%gsq)
5966 IF (pw1%pw_grid%grid_span == halfspace) THEN
5967 integral_value = 2.0_dp*integral_value
5968 END IF
5969
5970 integral_value = integral_value*pw1%pw_grid%vol
5971
5972 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
5973 CALL pw1%pw_grid%para%group%sum(integral_value)
5974 CALL timestop(handle)
5975
5976 END FUNCTION pw_integral_a2b_c1d_r1d
5977! **************************************************************************************************
5978!> \brief copy a pw type variable
5979!> \param pw1 ...
5980!> \param pw2 ...
5981!> \par History
5982!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5983!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5984!> JGH (21-Feb-2003) : Code for generalized reference grids
5985!> \author apsi
5986!> \note
5987!> Currently only copying of respective types allowed,
5988!> in order to avoid errors
5989! **************************************************************************************************
5990 SUBROUTINE pw_copy_c1d_c1d_rs (pw1, pw2)
5991
5992 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5993 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
5994
5995 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5996
5997 INTEGER :: handle
5998 INTEGER :: i, j, ng, ng1, ng2, ns
5999
6000 CALL timeset(routinen, handle)
6001
6002 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6003 cpabort("Both grids must be either spherical or non-spherical!")
6004 IF (pw1%pw_grid%spherical) &
6005 cpabort("Spherical grids only exist in reciprocal space!")
6006
6007 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6008 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
6009 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
6010 ng1 = SIZE(pw1%array)
6011 ng2 = SIZE(pw2%array)
6012 ng = min(ng1, ng2)
6013!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
6014 pw2%array(1:ng) = pw1%array(1:ng)
6015!$OMP END PARALLEL WORKSHARE
6016 IF (ng2 > ng) THEN
6017!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
6018 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
6019!$OMP END PARALLEL WORKSHARE
6020 END IF
6021 ELSE
6022 cpabort("Copies between spherical grids require compatible grids!")
6023 END IF
6024 ELSE
6025 ng1 = SIZE(pw1%array)
6026 ng2 = SIZE(pw2%array)
6027 ns = 2*max(ng1, ng2)
6028
6029 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6030 IF (ng1 >= ng2) THEN
6031!$OMP PARALLEL DO DEFAULT(NONE) &
6032!$OMP PRIVATE(i,j) &
6033!$OMP SHARED(ng2, pw1, pw2)
6034 DO i = 1, ng2
6035 j = pw2%pw_grid%gidx(i)
6036 pw2%array(i) = pw1%array(j)
6037 END DO
6038!$OMP END PARALLEL DO
6039 ELSE
6040 CALL pw_zero(pw2)
6041!$OMP PARALLEL DO DEFAULT(NONE) &
6042!$OMP PRIVATE(i,j) &
6043!$OMP SHARED(ng1, pw1, pw2)
6044 DO i = 1, ng1
6045 j = pw2%pw_grid%gidx(i)
6046 pw2%array(j) = pw1%array(i)
6047 END DO
6048!$OMP END PARALLEL DO
6049 END IF
6050 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6051 IF (ng1 >= ng2) THEN
6052!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
6053 DO i = 1, ng2
6054 j = pw1%pw_grid%gidx(i)
6055 pw2%array(i) = pw1%array(j)
6056 END DO
6057!$OMP END PARALLEL DO
6058 ELSE
6059 CALL pw_zero(pw2)
6060!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
6061 DO i = 1, ng1
6062 j = pw1%pw_grid%gidx(i)
6063 pw2%array(j) = pw1%array(i)
6064 END DO
6065!$OMP END PARALLEL DO
6066 END IF
6067 ELSE
6068 cpabort("Copy not implemented!")
6069 END IF
6070
6071 END IF
6072
6073 ELSE
6074!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6075 pw2%array = pw1%array
6076!$OMP END PARALLEL WORKSHARE
6077 END IF
6078
6079 CALL timestop(handle)
6080
6081 END SUBROUTINE pw_copy_c1d_c1d_rs
6082
6083! **************************************************************************************************
6084!> \brief ...
6085!> \param pw ...
6086!> \param array ...
6087! **************************************************************************************************
6088 SUBROUTINE pw_copy_to_array_c1d_c1d_rs (pw, array)
6089 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
6090 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
6091
6092 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6093
6094 INTEGER :: handle
6095
6096 CALL timeset(routinen, handle)
6097
6098!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6099 array(:) = pw%array(:)
6100!$OMP END PARALLEL WORKSHARE
6101
6102 CALL timestop(handle)
6103 END SUBROUTINE pw_copy_to_array_c1d_c1d_rs
6104
6105! **************************************************************************************************
6106!> \brief ...
6107!> \param pw ...
6108!> \param array ...
6109! **************************************************************************************************
6110 SUBROUTINE pw_copy_from_array_c1d_c1d_rs (pw, array)
6111 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
6112 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
6113
6114 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
6115
6116 INTEGER :: handle
6117
6118 CALL timeset(routinen, handle)
6119
6120!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6121 pw%array = array
6122!$OMP END PARALLEL WORKSHARE
6123
6124 CALL timestop(handle)
6125 END SUBROUTINE pw_copy_from_array_c1d_c1d_rs
6126
6127! **************************************************************************************************
6128!> \brief pw2 = alpha*pw1 + beta*pw2
6129!> alpha defaults to 1, beta defaults to 1
6130!> \param pw1 ...
6131!> \param pw2 ...
6132!> \param alpha ...
6133!> \param beta ...
6134!> \param allow_noncompatible_grids ...
6135!> \par History
6136!> JGH (21-Feb-2003) : added reference grid functionality
6137!> JGH (01-Dec-2007) : rename and remove complex alpha
6138!> \author apsi
6139!> \note
6140!> Currently only summing up of respective types allowed,
6141!> in order to avoid errors
6142! **************************************************************************************************
6143 SUBROUTINE pw_axpy_c1d_c1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
6144
6145 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6146 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
6147 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
6148 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
6149
6150 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
6151
6152 INTEGER :: handle
6153 LOGICAL :: my_allow_noncompatible_grids
6154 REAL(KIND=dp) :: my_alpha, my_beta
6155 INTEGER :: i, j, ng, ng1, ng2
6156
6157 CALL timeset(routinen, handle)
6158
6159 my_alpha = 1.0_dp
6160 IF (PRESENT(alpha)) my_alpha = alpha
6161
6162 my_beta = 1.0_dp
6163 IF (PRESENT(beta)) my_beta = beta
6164
6165 my_allow_noncompatible_grids = .false.
6166 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
6167
6168 IF (my_beta /= 1.0_dp) THEN
6169 IF (my_beta == 0.0_dp) THEN
6170 CALL pw_zero(pw2)
6171 ELSE
6172!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
6173 pw2%array = pw2%array*my_beta
6174!$OMP END PARALLEL WORKSHARE
6175 END IF
6176 END IF
6177
6178 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6179
6180 IF (my_alpha == 1.0_dp) THEN
6181!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6182 pw2%array = pw2%array + pw1%array
6183!$OMP END PARALLEL WORKSHARE
6184 ELSE IF (my_alpha /= 0.0_dp) THEN
6185!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
6186 pw2%array = pw2%array + my_alpha* pw1%array
6187!$OMP END PARALLEL WORKSHARE
6188 END IF
6189
6190 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
6191
6192 ng1 = SIZE(pw1%array)
6193 ng2 = SIZE(pw2%array)
6194 ng = min(ng1, ng2)
6195
6196 IF (pw1%pw_grid%spherical) THEN
6197 IF (my_alpha == 1.0_dp) THEN
6198!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6199 DO i = 1, ng
6200 pw2%array(i) = pw2%array(i) + pw1%array(i)
6201 END DO
6202!$OMP END PARALLEL DO
6203 ELSE IF (my_alpha /= 0.0_dp) THEN
6204!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
6205 DO i = 1, ng
6206 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
6207 END DO
6208!$OMP END PARALLEL DO
6209 END IF
6210 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6211 IF (ng1 >= ng2) THEN
6212 IF (my_alpha == 1.0_dp) THEN
6213!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6214 DO i = 1, ng
6215 j = pw2%pw_grid%gidx(i)
6216 pw2%array(i) = pw2%array(i) + pw1%array(j)
6217 END DO
6218!$OMP END PARALLEL DO
6219 ELSE IF (my_alpha /= 0.0_dp) THEN
6220!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6221 DO i = 1, ng
6222 j = pw2%pw_grid%gidx(i)
6223 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6224 END DO
6225!$OMP END PARALLEL DO
6226 END IF
6227 ELSE
6228 IF (my_alpha == 1.0_dp) THEN
6229!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6230 DO i = 1, ng
6231 j = pw2%pw_grid%gidx(i)
6232 pw2%array(j) = pw2%array(j) + pw1%array(i)
6233 END DO
6234!$OMP END PARALLEL DO
6235 ELSE IF (my_alpha /= 0.0_dp) THEN
6236!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6237 DO i = 1, ng
6238 j = pw2%pw_grid%gidx(i)
6239 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6240 END DO
6241!$OMP END PARALLEL DO
6242 END IF
6243 END IF
6244 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6245 IF (ng1 >= ng2) THEN
6246 IF (my_alpha == 1.0_dp) THEN
6247!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6248 DO i = 1, ng
6249 j = pw1%pw_grid%gidx(i)
6250 pw2%array(i) = pw2%array(i) + pw1%array(j)
6251 END DO
6252!$OMP END PARALLEL DO
6253 ELSE IF (my_alpha /= 0.0_dp) THEN
6254!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6255 DO i = 1, ng
6256 j = pw1%pw_grid%gidx(i)
6257 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6258 END DO
6259!$OMP END PARALLEL DO
6260 END IF
6261 ELSE
6262 IF (my_alpha == 1.0_dp) THEN
6263!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6264 DO i = 1, ng
6265 j = pw1%pw_grid%gidx(i)
6266 pw2%array(j) = pw2%array(j) + pw1%array(i)
6267 END DO
6268!$OMP END PARALLEL DO
6269 ELSE IF (my_alpha /= 0.0_dp) THEN
6270!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6271 DO i = 1, ng
6272 j = pw1%pw_grid%gidx(i)
6273 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6274 END DO
6275!$OMP END PARALLEL DO
6276 END IF
6277 END IF
6278 ELSE
6279 cpabort("Grids not compatible")
6280 END IF
6281
6282 ELSE
6283
6284 cpabort("Grids not compatible")
6285
6286 END IF
6287
6288 CALL timestop(handle)
6289
6290 END SUBROUTINE pw_axpy_c1d_c1d_rs
6291
6292! **************************************************************************************************
6293!> \brief pw_out = pw_out + alpha * pw1 * pw2
6294!> alpha defaults to 1
6295!> \param pw_out ...
6296!> \param pw1 ...
6297!> \param pw2 ...
6298!> \param alpha ...
6299!> \author JGH
6300! **************************************************************************************************
6301 SUBROUTINE pw_multiply_c1d_c1d_rs (pw_out, pw1, pw2, alpha)
6302
6303 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw_out
6304 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6305 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6306 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
6307
6308 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
6309
6310 INTEGER :: handle
6311 REAL(KIND=dp) :: my_alpha
6312
6313 CALL timeset(routinen, handle)
6314
6315 my_alpha = 1.0_dp
6316 IF (PRESENT(alpha)) my_alpha = alpha
6317
6318 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
6319 cpabort("pw_multiply not implemented for non-identical grids!")
6320
6321#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
6322 IF (my_alpha == 1.0_dp) THEN
6323!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
6324 pw_out%array = pw_out%array + pw1%array* pw2%array
6325!$OMP END PARALLEL WORKSHARE
6326 ELSE
6327!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
6328 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6329!$OMP END PARALLEL WORKSHARE
6330 END IF
6331#else
6332 IF (my_alpha == 1.0_dp) THEN
6333 pw_out%array = pw_out%array + pw1%array* pw2%array
6334 ELSE
6335 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6336 END IF
6337#endif
6338
6339 CALL timestop(handle)
6340
6341 END SUBROUTINE pw_multiply_c1d_c1d_rs
6342
6343! **************************************************************************************************
6344!> \brief ...
6345!> \param pw1 ...
6346!> \param pw2 ...
6347! **************************************************************************************************
6348 SUBROUTINE pw_multiply_with_c1d_c1d_rs (pw1, pw2)
6349 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw1
6350 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6351
6352 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
6353
6354 INTEGER :: handle
6355
6356 CALL timeset(routinen, handle)
6357
6358 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
6359 cpabort("Incompatible grids!")
6360
6361!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6362 pw1%array = pw1%array* pw2%array
6363!$OMP END PARALLEL WORKSHARE
6364
6365 CALL timestop(handle)
6366
6367 END SUBROUTINE pw_multiply_with_c1d_c1d_rs
6368
6369! **************************************************************************************************
6370!> \brief Calculate integral over unit cell for functions in plane wave basis
6371!> only returns the real part of it ......
6372!> \param pw1 ...
6373!> \param pw2 ...
6374!> \param sumtype ...
6375!> \param just_sum ...
6376!> \param local_only ...
6377!> \return ...
6378!> \par History
6379!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
6380!> \author apsi
6381! **************************************************************************************************
6382 FUNCTION pw_integral_ab_c1d_c1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
6383
6384 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6385 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6386 INTEGER, INTENT(IN), OPTIONAL :: sumtype
6387 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
6388 REAL(kind=dp) :: integral_value
6389
6390 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_c1d_rs'
6391
6392 INTEGER :: handle, loc_sumtype
6393 LOGICAL :: my_just_sum, my_local_only
6394
6395 CALL timeset(routinen, handle)
6396
6397 loc_sumtype = do_accurate_sum
6398 IF (PRESENT(sumtype)) loc_sumtype = sumtype
6399
6400 my_local_only = .false.
6401 IF (PRESENT(local_only)) my_local_only = local_only
6402
6403 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6404 cpabort("Grids incompatible")
6405 END IF
6406
6407 my_just_sum = .false.
6408 IF (PRESENT(just_sum)) my_just_sum = just_sum
6409
6410 ! do standard sum
6411 IF (loc_sumtype == do_standard_sum) THEN
6412
6413 ! Do standard sum
6414
6415 integral_value = sum(real(conjg(pw1%array) &
6416 *pw2%array, kind=dp)) !? complex bit
6417
6418 ELSE
6419
6420 ! Do accurate sum
6421 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
6422
6423 END IF
6424
6425 IF (.NOT. my_just_sum) THEN
6426 integral_value = integral_value*pw1%pw_grid%dvol
6427 END IF
6428
6429 IF (pw1%pw_grid%grid_span == halfspace) THEN
6430 integral_value = 2.0_dp*integral_value
6431 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
6432 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
6433 END IF
6434
6435 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
6436 CALL pw1%pw_grid%para%group%sum(integral_value)
6437
6438 CALL timestop(handle)
6439
6440 END FUNCTION pw_integral_ab_c1d_c1d_rs
6441! **************************************************************************************************
6442!> \brief copy a pw type variable
6443!> \param pw1 ...
6444!> \param pw2 ...
6445!> \par History
6446!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
6447!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
6448!> JGH (21-Feb-2003) : Code for generalized reference grids
6449!> \author apsi
6450!> \note
6451!> Currently only copying of respective types allowed,
6452!> in order to avoid errors
6453! **************************************************************************************************
6454 SUBROUTINE pw_copy_c1d_c1d_gs (pw1, pw2)
6455
6456 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6457 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
6458
6459 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
6460
6461 INTEGER :: handle
6462 INTEGER :: i, j, ng, ng1, ng2, ns
6463
6464 CALL timeset(routinen, handle)
6465
6466 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6467 cpabort("Both grids must be either spherical or non-spherical!")
6468
6469 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6470 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
6471 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
6472 ng1 = SIZE(pw1%array)
6473 ng2 = SIZE(pw2%array)
6474 ng = min(ng1, ng2)
6475!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
6476 pw2%array(1:ng) = pw1%array(1:ng)
6477!$OMP END PARALLEL WORKSHARE
6478 IF (ng2 > ng) THEN
6479!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
6480 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
6481!$OMP END PARALLEL WORKSHARE
6482 END IF
6483 ELSE
6484 cpabort("Copies between spherical grids require compatible grids!")
6485 END IF
6486 ELSE
6487 ng1 = SIZE(pw1%array)
6488 ng2 = SIZE(pw2%array)
6489 ns = 2*max(ng1, ng2)
6490
6491 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6492 IF (ng1 >= ng2) THEN
6493!$OMP PARALLEL DO DEFAULT(NONE) &
6494!$OMP PRIVATE(i,j) &
6495!$OMP SHARED(ng2, pw1, pw2)
6496 DO i = 1, ng2
6497 j = pw2%pw_grid%gidx(i)
6498 pw2%array(i) = pw1%array(j)
6499 END DO
6500!$OMP END PARALLEL DO
6501 ELSE
6502 CALL pw_zero(pw2)
6503!$OMP PARALLEL DO DEFAULT(NONE) &
6504!$OMP PRIVATE(i,j) &
6505!$OMP SHARED(ng1, pw1, pw2)
6506 DO i = 1, ng1
6507 j = pw2%pw_grid%gidx(i)
6508 pw2%array(j) = pw1%array(i)
6509 END DO
6510!$OMP END PARALLEL DO
6511 END IF
6512 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6513 IF (ng1 >= ng2) THEN
6514!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
6515 DO i = 1, ng2
6516 j = pw1%pw_grid%gidx(i)
6517 pw2%array(i) = pw1%array(j)
6518 END DO
6519!$OMP END PARALLEL DO
6520 ELSE
6521 CALL pw_zero(pw2)
6522!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
6523 DO i = 1, ng1
6524 j = pw1%pw_grid%gidx(i)
6525 pw2%array(j) = pw1%array(i)
6526 END DO
6527!$OMP END PARALLEL DO
6528 END IF
6529 ELSE
6530 CALL pw_copy_match(pw1, pw2)
6531 END IF
6532
6533 END IF
6534
6535 ELSE
6536!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6537 pw2%array = pw1%array
6538!$OMP END PARALLEL WORKSHARE
6539 END IF
6540
6541 CALL timestop(handle)
6542
6543 END SUBROUTINE pw_copy_c1d_c1d_gs
6544
6545! **************************************************************************************************
6546!> \brief ...
6547!> \param pw ...
6548!> \param array ...
6549! **************************************************************************************************
6550 SUBROUTINE pw_copy_to_array_c1d_c1d_gs (pw, array)
6551 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
6552 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
6553
6554 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6555
6556 INTEGER :: handle
6557
6558 CALL timeset(routinen, handle)
6559
6560!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6561 array(:) = pw%array(:)
6562!$OMP END PARALLEL WORKSHARE
6563
6564 CALL timestop(handle)
6565 END SUBROUTINE pw_copy_to_array_c1d_c1d_gs
6566
6567! **************************************************************************************************
6568!> \brief ...
6569!> \param pw ...
6570!> \param array ...
6571! **************************************************************************************************
6572 SUBROUTINE pw_copy_from_array_c1d_c1d_gs (pw, array)
6573 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
6574 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
6575
6576 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
6577
6578 INTEGER :: handle
6579
6580 CALL timeset(routinen, handle)
6581
6582!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6583 pw%array = array
6584!$OMP END PARALLEL WORKSHARE
6585
6586 CALL timestop(handle)
6587 END SUBROUTINE pw_copy_from_array_c1d_c1d_gs
6588
6589! **************************************************************************************************
6590!> \brief pw2 = alpha*pw1 + beta*pw2
6591!> alpha defaults to 1, beta defaults to 1
6592!> \param pw1 ...
6593!> \param pw2 ...
6594!> \param alpha ...
6595!> \param beta ...
6596!> \param allow_noncompatible_grids ...
6597!> \par History
6598!> JGH (21-Feb-2003) : added reference grid functionality
6599!> JGH (01-Dec-2007) : rename and remove complex alpha
6600!> \author apsi
6601!> \note
6602!> Currently only summing up of respective types allowed,
6603!> in order to avoid errors
6604! **************************************************************************************************
6605 SUBROUTINE pw_axpy_c1d_c1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
6606
6607 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6608 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
6609 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
6610 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
6611
6612 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
6613
6614 INTEGER :: handle
6615 LOGICAL :: my_allow_noncompatible_grids
6616 REAL(KIND=dp) :: my_alpha, my_beta
6617 INTEGER :: i, j, ng, ng1, ng2
6618
6619 CALL timeset(routinen, handle)
6620
6621 my_alpha = 1.0_dp
6622 IF (PRESENT(alpha)) my_alpha = alpha
6623
6624 my_beta = 1.0_dp
6625 IF (PRESENT(beta)) my_beta = beta
6626
6627 my_allow_noncompatible_grids = .false.
6628 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
6629
6630 IF (my_beta /= 1.0_dp) THEN
6631 IF (my_beta == 0.0_dp) THEN
6632 CALL pw_zero(pw2)
6633 ELSE
6634!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
6635 pw2%array = pw2%array*my_beta
6636!$OMP END PARALLEL WORKSHARE
6637 END IF
6638 END IF
6639
6640 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6641
6642 IF (my_alpha == 1.0_dp) THEN
6643!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6644 pw2%array = pw2%array + pw1%array
6645!$OMP END PARALLEL WORKSHARE
6646 ELSE IF (my_alpha /= 0.0_dp) THEN
6647!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
6648 pw2%array = pw2%array + my_alpha* pw1%array
6649!$OMP END PARALLEL WORKSHARE
6650 END IF
6651
6652 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
6653
6654 ng1 = SIZE(pw1%array)
6655 ng2 = SIZE(pw2%array)
6656 ng = min(ng1, ng2)
6657
6658 IF (pw1%pw_grid%spherical) THEN
6659 IF (my_alpha == 1.0_dp) THEN
6660!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6661 DO i = 1, ng
6662 pw2%array(i) = pw2%array(i) + pw1%array(i)
6663 END DO
6664!$OMP END PARALLEL DO
6665 ELSE IF (my_alpha /= 0.0_dp) THEN
6666!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
6667 DO i = 1, ng
6668 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
6669 END DO
6670!$OMP END PARALLEL DO
6671 END IF
6672 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6673 IF (ng1 >= ng2) THEN
6674 IF (my_alpha == 1.0_dp) THEN
6675!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6676 DO i = 1, ng
6677 j = pw2%pw_grid%gidx(i)
6678 pw2%array(i) = pw2%array(i) + pw1%array(j)
6679 END DO
6680!$OMP END PARALLEL DO
6681 ELSE IF (my_alpha /= 0.0_dp) THEN
6682!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6683 DO i = 1, ng
6684 j = pw2%pw_grid%gidx(i)
6685 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6686 END DO
6687!$OMP END PARALLEL DO
6688 END IF
6689 ELSE
6690 IF (my_alpha == 1.0_dp) THEN
6691!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6692 DO i = 1, ng
6693 j = pw2%pw_grid%gidx(i)
6694 pw2%array(j) = pw2%array(j) + pw1%array(i)
6695 END DO
6696!$OMP END PARALLEL DO
6697 ELSE IF (my_alpha /= 0.0_dp) THEN
6698!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6699 DO i = 1, ng
6700 j = pw2%pw_grid%gidx(i)
6701 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6702 END DO
6703!$OMP END PARALLEL DO
6704 END IF
6705 END IF
6706 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6707 IF (ng1 >= ng2) THEN
6708 IF (my_alpha == 1.0_dp) THEN
6709!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6710 DO i = 1, ng
6711 j = pw1%pw_grid%gidx(i)
6712 pw2%array(i) = pw2%array(i) + pw1%array(j)
6713 END DO
6714!$OMP END PARALLEL DO
6715 ELSE IF (my_alpha /= 0.0_dp) THEN
6716!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6717 DO i = 1, ng
6718 j = pw1%pw_grid%gidx(i)
6719 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6720 END DO
6721!$OMP END PARALLEL DO
6722 END IF
6723 ELSE
6724 IF (my_alpha == 1.0_dp) THEN
6725!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6726 DO i = 1, ng
6727 j = pw1%pw_grid%gidx(i)
6728 pw2%array(j) = pw2%array(j) + pw1%array(i)
6729 END DO
6730!$OMP END PARALLEL DO
6731 ELSE IF (my_alpha /= 0.0_dp) THEN
6732!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6733 DO i = 1, ng
6734 j = pw1%pw_grid%gidx(i)
6735 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6736 END DO
6737!$OMP END PARALLEL DO
6738 END IF
6739 END IF
6740 ELSE
6741 cpabort("Grids not compatible")
6742 END IF
6743
6744 ELSE
6745
6746 cpabort("Grids not compatible")
6747
6748 END IF
6749
6750 CALL timestop(handle)
6751
6752 END SUBROUTINE pw_axpy_c1d_c1d_gs
6753
6754! **************************************************************************************************
6755!> \brief pw_out = pw_out + alpha * pw1 * pw2
6756!> alpha defaults to 1
6757!> \param pw_out ...
6758!> \param pw1 ...
6759!> \param pw2 ...
6760!> \param alpha ...
6761!> \author JGH
6762! **************************************************************************************************
6763 SUBROUTINE pw_multiply_c1d_c1d_gs (pw_out, pw1, pw2, alpha)
6764
6765 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw_out
6766 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6767 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6768 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
6769
6770 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
6771
6772 INTEGER :: handle
6773 REAL(KIND=dp) :: my_alpha
6774
6775 CALL timeset(routinen, handle)
6776
6777 my_alpha = 1.0_dp
6778 IF (PRESENT(alpha)) my_alpha = alpha
6779
6780 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
6781 cpabort("pw_multiply not implemented for non-identical grids!")
6782
6783#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
6784 IF (my_alpha == 1.0_dp) THEN
6785!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
6786 pw_out%array = pw_out%array + pw1%array* pw2%array
6787!$OMP END PARALLEL WORKSHARE
6788 ELSE
6789!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
6790 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6791!$OMP END PARALLEL WORKSHARE
6792 END IF
6793#else
6794 IF (my_alpha == 1.0_dp) THEN
6795 pw_out%array = pw_out%array + pw1%array* pw2%array
6796 ELSE
6797 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6798 END IF
6799#endif
6800
6801 CALL timestop(handle)
6802
6803 END SUBROUTINE pw_multiply_c1d_c1d_gs
6804
6805! **************************************************************************************************
6806!> \brief ...
6807!> \param pw1 ...
6808!> \param pw2 ...
6809! **************************************************************************************************
6810 SUBROUTINE pw_multiply_with_c1d_c1d_gs (pw1, pw2)
6811 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw1
6812 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6813
6814 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
6815
6816 INTEGER :: handle
6817
6818 CALL timeset(routinen, handle)
6819
6820 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
6821 cpabort("Incompatible grids!")
6822
6823!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6824 pw1%array = pw1%array* pw2%array
6825!$OMP END PARALLEL WORKSHARE
6826
6827 CALL timestop(handle)
6828
6829 END SUBROUTINE pw_multiply_with_c1d_c1d_gs
6830
6831! **************************************************************************************************
6832!> \brief Calculate integral over unit cell for functions in plane wave basis
6833!> only returns the real part of it ......
6834!> \param pw1 ...
6835!> \param pw2 ...
6836!> \param sumtype ...
6837!> \param just_sum ...
6838!> \param local_only ...
6839!> \return ...
6840!> \par History
6841!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
6842!> \author apsi
6843! **************************************************************************************************
6844 FUNCTION pw_integral_ab_c1d_c1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
6845
6846 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6847 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6848 INTEGER, INTENT(IN), OPTIONAL :: sumtype
6849 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
6850 REAL(kind=dp) :: integral_value
6851
6852 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c1d_c1d_gs'
6853
6854 INTEGER :: handle, loc_sumtype
6855 LOGICAL :: my_just_sum, my_local_only
6856
6857 CALL timeset(routinen, handle)
6858
6859 loc_sumtype = do_accurate_sum
6860 IF (PRESENT(sumtype)) loc_sumtype = sumtype
6861
6862 my_local_only = .false.
6863 IF (PRESENT(local_only)) my_local_only = local_only
6864
6865 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6866 cpabort("Grids incompatible")
6867 END IF
6868
6869 my_just_sum = .false.
6870 IF (PRESENT(just_sum)) my_just_sum = just_sum
6871
6872 ! do standard sum
6873 IF (loc_sumtype == do_standard_sum) THEN
6874
6875 ! Do standard sum
6876
6877 integral_value = sum(real(conjg(pw1%array) &
6878 *pw2%array, kind=dp)) !? complex bit
6879
6880 ELSE
6881
6882 ! Do accurate sum
6883 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
6884
6885 END IF
6886
6887 IF (.NOT. my_just_sum) THEN
6888 integral_value = integral_value*pw1%pw_grid%vol
6889 END IF
6890
6891 IF (pw1%pw_grid%grid_span == halfspace) THEN
6892 integral_value = 2.0_dp*integral_value
6893 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
6894 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
6895 END IF
6896
6897 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
6898 CALL pw1%pw_grid%para%group%sum(integral_value)
6899
6900 CALL timestop(handle)
6901
6902 END FUNCTION pw_integral_ab_c1d_c1d_gs
6903
6904! **************************************************************************************************
6905!> \brief ...
6906!> \param pw1 ...
6907!> \param pw2 ...
6908!> \return ...
6909! **************************************************************************************************
6910 FUNCTION pw_integral_a2b_c1d_c1d (pw1, pw2) RESULT(integral_value)
6911
6912 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6913 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6914 REAL(kind=dp) :: integral_value
6915
6916 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
6917
6918 INTEGER :: handle
6919
6920 CALL timeset(routinen, handle)
6921
6922 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6923 cpabort("Grids incompatible")
6924 END IF
6925
6926 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp)*pw1%pw_grid%gsq)
6927 IF (pw1%pw_grid%grid_span == halfspace) THEN
6928 integral_value = 2.0_dp*integral_value
6929 END IF
6930
6931 integral_value = integral_value*pw1%pw_grid%vol
6932
6933 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
6934 CALL pw1%pw_grid%para%group%sum(integral_value)
6935 CALL timestop(handle)
6936
6937 END FUNCTION pw_integral_a2b_c1d_c1d
6938! **************************************************************************************************
6939!> \brief copy a pw type variable
6940!> \param pw1 ...
6941!> \param pw2 ...
6942!> \par History
6943!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
6944!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
6945!> JGH (21-Feb-2003) : Code for generalized reference grids
6946!> \author apsi
6947!> \note
6948!> Currently only copying of respective types allowed,
6949!> in order to avoid errors
6950! **************************************************************************************************
6951 SUBROUTINE pw_copy_c3d_r3d_rs (pw1, pw2)
6952
6953 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
6954 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
6955
6956 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
6957
6958 INTEGER :: handle
6959
6960 CALL timeset(routinen, handle)
6961
6962 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6963 cpabort("Both grids must be either spherical or non-spherical!")
6964 IF (pw1%pw_grid%spherical) &
6965 cpabort("Spherical grids only exist in reciprocal space!")
6966
6967 IF (any(shape(pw2%array) /= shape(pw1%array))) &
6968 cpabort("3D grids must be compatible!")
6969 IF (pw1%pw_grid%spherical) &
6970 cpabort("3D grids must not be spherical!")
6971!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6972 pw2%array(:, :, :) = real(pw1%array(:, :, :), kind=dp)
6973!$OMP END PARALLEL WORKSHARE
6974
6975 CALL timestop(handle)
6976
6977 END SUBROUTINE pw_copy_c3d_r3d_rs
6978
6979! **************************************************************************************************
6980!> \brief ...
6981!> \param pw ...
6982!> \param array ...
6983! **************************************************************************************************
6984 SUBROUTINE pw_copy_to_array_c3d_r3d_rs (pw, array)
6985 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
6986 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
6987
6988 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6989
6990 INTEGER :: handle
6991
6992 CALL timeset(routinen, handle)
6993
6994!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6995 array(:, :, :) = real(pw%array(:, :, :), kind=dp)
6996!$OMP END PARALLEL WORKSHARE
6997
6998 CALL timestop(handle)
6999 END SUBROUTINE pw_copy_to_array_c3d_r3d_rs
7000
7001! **************************************************************************************************
7002!> \brief ...
7003!> \param pw ...
7004!> \param array ...
7005! **************************************************************************************************
7006 SUBROUTINE pw_copy_from_array_c3d_r3d_rs (pw, array)
7007 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7008 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7009
7010 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7011
7012 INTEGER :: handle
7013
7014 CALL timeset(routinen, handle)
7015
7016!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7017 pw%array = cmplx(array, 0.0_dp, kind=dp)
7018!$OMP END PARALLEL WORKSHARE
7019
7020 CALL timestop(handle)
7021 END SUBROUTINE pw_copy_from_array_c3d_r3d_rs
7022
7023! **************************************************************************************************
7024!> \brief pw2 = alpha*pw1 + beta*pw2
7025!> alpha defaults to 1, beta defaults to 1
7026!> \param pw1 ...
7027!> \param pw2 ...
7028!> \param alpha ...
7029!> \param beta ...
7030!> \param allow_noncompatible_grids ...
7031!> \par History
7032!> JGH (21-Feb-2003) : added reference grid functionality
7033!> JGH (01-Dec-2007) : rename and remove complex alpha
7034!> \author apsi
7035!> \note
7036!> Currently only summing up of respective types allowed,
7037!> in order to avoid errors
7038! **************************************************************************************************
7039 SUBROUTINE pw_axpy_c3d_r3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7040
7041 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7042 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
7043 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7044 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7045
7046 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7047
7048 INTEGER :: handle
7049 LOGICAL :: my_allow_noncompatible_grids
7050 REAL(KIND=dp) :: my_alpha, my_beta
7051
7052 CALL timeset(routinen, handle)
7053
7054 my_alpha = 1.0_dp
7055 IF (PRESENT(alpha)) my_alpha = alpha
7056
7057 my_beta = 1.0_dp
7058 IF (PRESENT(beta)) my_beta = beta
7059
7060 my_allow_noncompatible_grids = .false.
7061 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7062
7063 IF (my_beta /= 1.0_dp) THEN
7064 IF (my_beta == 0.0_dp) THEN
7065 CALL pw_zero(pw2)
7066 ELSE
7067!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7068 pw2%array = pw2%array*my_beta
7069!$OMP END PARALLEL WORKSHARE
7070 END IF
7071 END IF
7072
7073 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7074 IF (my_alpha == 1.0_dp) THEN
7075!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7076 pw2%array = pw2%array + real(pw1%array, kind=dp)
7077!$OMP END PARALLEL WORKSHARE
7078 ELSE IF (my_alpha /= 0.0_dp) THEN
7079!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7080 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7081!$OMP END PARALLEL WORKSHARE
7082 END IF
7083
7084 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7085
7086 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7087 cpabort("Noncommensurate grids not implemented for 3D grids!")
7088
7089 IF (my_alpha == 1.0_dp) THEN
7090!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7091 pw2%array = pw2%array + real(pw1%array, kind=dp)
7092!$OMP END PARALLEL WORKSHARE
7093 ELSE
7094!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7095 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7096!$OMP END PARALLEL WORKSHARE
7097 END IF
7098
7099 ELSE
7100
7101 cpabort("Grids not compatible")
7102
7103 END IF
7104
7105 CALL timestop(handle)
7106
7107 END SUBROUTINE pw_axpy_c3d_r3d_rs
7108
7109! **************************************************************************************************
7110!> \brief pw_out = pw_out + alpha * pw1 * pw2
7111!> alpha defaults to 1
7112!> \param pw_out ...
7113!> \param pw1 ...
7114!> \param pw2 ...
7115!> \param alpha ...
7116!> \author JGH
7117! **************************************************************************************************
7118 SUBROUTINE pw_multiply_c3d_r3d_rs (pw_out, pw1, pw2, alpha)
7119
7120 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw_out
7121 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7122 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7123 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7124
7125 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7126
7127 INTEGER :: handle
7128 REAL(KIND=dp) :: my_alpha
7129
7130 CALL timeset(routinen, handle)
7131
7132 my_alpha = 1.0_dp
7133 IF (PRESENT(alpha)) my_alpha = alpha
7134
7135 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7136 cpabort("pw_multiply not implemented for non-identical grids!")
7137
7138#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
7139 IF (my_alpha == 1.0_dp) THEN
7140!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7141 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7142!$OMP END PARALLEL WORKSHARE
7143 ELSE
7144!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7145 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7146!$OMP END PARALLEL WORKSHARE
7147 END IF
7148#else
7149 IF (my_alpha == 1.0_dp) THEN
7150 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7151 ELSE
7152 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7153 END IF
7154#endif
7155
7156 CALL timestop(handle)
7157
7158 END SUBROUTINE pw_multiply_c3d_r3d_rs
7159
7160! **************************************************************************************************
7161!> \brief ...
7162!> \param pw1 ...
7163!> \param pw2 ...
7164! **************************************************************************************************
7165 SUBROUTINE pw_multiply_with_c3d_r3d_rs (pw1, pw2)
7166 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw1
7167 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7168
7169 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7170
7171 INTEGER :: handle
7172
7173 CALL timeset(routinen, handle)
7174
7175 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7176 cpabort("Incompatible grids!")
7177
7178!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7179 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7180!$OMP END PARALLEL WORKSHARE
7181
7182 CALL timestop(handle)
7183
7184 END SUBROUTINE pw_multiply_with_c3d_r3d_rs
7185
7186! **************************************************************************************************
7187!> \brief Calculate integral over unit cell for functions in plane wave basis
7188!> only returns the real part of it ......
7189!> \param pw1 ...
7190!> \param pw2 ...
7191!> \param sumtype ...
7192!> \param just_sum ...
7193!> \param local_only ...
7194!> \return ...
7195!> \par History
7196!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7197!> \author apsi
7198! **************************************************************************************************
7199 FUNCTION pw_integral_ab_c3d_r3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7200
7201 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7202 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7203 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7204 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7205 REAL(kind=dp) :: integral_value
7206
7207 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_r3d_rs'
7208
7209 INTEGER :: handle, loc_sumtype
7210 LOGICAL :: my_just_sum, my_local_only
7211
7212 CALL timeset(routinen, handle)
7213
7214 loc_sumtype = do_accurate_sum
7215 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7216
7217 my_local_only = .false.
7218 IF (PRESENT(local_only)) my_local_only = local_only
7219
7220 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7221 cpabort("Grids incompatible")
7222 END IF
7223
7224 my_just_sum = .false.
7225 IF (PRESENT(just_sum)) my_just_sum = just_sum
7226
7227 ! do standard sum
7228 IF (loc_sumtype == do_standard_sum) THEN
7229
7230 ! Do standard sum
7231
7232 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7233
7234 ELSE
7235
7236 ! Do accurate sum
7237 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7238
7239 END IF
7240
7241 IF (.NOT. my_just_sum) THEN
7242 integral_value = integral_value*pw1%pw_grid%dvol
7243 END IF
7244
7245
7246 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7247 CALL pw1%pw_grid%para%group%sum(integral_value)
7248
7249 CALL timestop(handle)
7250
7251 END FUNCTION pw_integral_ab_c3d_r3d_rs
7252! **************************************************************************************************
7253!> \brief copy a pw type variable
7254!> \param pw1 ...
7255!> \param pw2 ...
7256!> \par History
7257!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7258!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7259!> JGH (21-Feb-2003) : Code for generalized reference grids
7260!> \author apsi
7261!> \note
7262!> Currently only copying of respective types allowed,
7263!> in order to avoid errors
7264! **************************************************************************************************
7265 SUBROUTINE pw_copy_c3d_r3d_gs (pw1, pw2)
7266
7267 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7268 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
7269
7270 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7271
7272 INTEGER :: handle
7273
7274 CALL timeset(routinen, handle)
7275
7276 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7277 cpabort("Both grids must be either spherical or non-spherical!")
7278
7279 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7280 cpabort("3D grids must be compatible!")
7281 IF (pw1%pw_grid%spherical) &
7282 cpabort("3D grids must not be spherical!")
7283!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7284 pw2%array(:, :, :) = real(pw1%array(:, :, :), kind=dp)
7285!$OMP END PARALLEL WORKSHARE
7286
7287 CALL timestop(handle)
7288
7289 END SUBROUTINE pw_copy_c3d_r3d_gs
7290
7291! **************************************************************************************************
7292!> \brief ...
7293!> \param pw ...
7294!> \param array ...
7295! **************************************************************************************************
7296 SUBROUTINE pw_copy_to_array_c3d_r3d_gs (pw, array)
7297 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7298 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7299
7300 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7301
7302 INTEGER :: handle
7303
7304 CALL timeset(routinen, handle)
7305
7306!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7307 array(:, :, :) = real(pw%array(:, :, :), kind=dp)
7308!$OMP END PARALLEL WORKSHARE
7309
7310 CALL timestop(handle)
7311 END SUBROUTINE pw_copy_to_array_c3d_r3d_gs
7312
7313! **************************************************************************************************
7314!> \brief ...
7315!> \param pw ...
7316!> \param array ...
7317! **************************************************************************************************
7318 SUBROUTINE pw_copy_from_array_c3d_r3d_gs (pw, array)
7319 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7320 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7321
7322 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7323
7324 INTEGER :: handle
7325
7326 CALL timeset(routinen, handle)
7327
7328!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7329 pw%array = cmplx(array, 0.0_dp, kind=dp)
7330!$OMP END PARALLEL WORKSHARE
7331
7332 CALL timestop(handle)
7333 END SUBROUTINE pw_copy_from_array_c3d_r3d_gs
7334
7335! **************************************************************************************************
7336!> \brief pw2 = alpha*pw1 + beta*pw2
7337!> alpha defaults to 1, beta defaults to 1
7338!> \param pw1 ...
7339!> \param pw2 ...
7340!> \param alpha ...
7341!> \param beta ...
7342!> \param allow_noncompatible_grids ...
7343!> \par History
7344!> JGH (21-Feb-2003) : added reference grid functionality
7345!> JGH (01-Dec-2007) : rename and remove complex alpha
7346!> \author apsi
7347!> \note
7348!> Currently only summing up of respective types allowed,
7349!> in order to avoid errors
7350! **************************************************************************************************
7351 SUBROUTINE pw_axpy_c3d_r3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7352
7353 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7354 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
7355 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7356 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7357
7358 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7359
7360 INTEGER :: handle
7361 LOGICAL :: my_allow_noncompatible_grids
7362 REAL(KIND=dp) :: my_alpha, my_beta
7363
7364 CALL timeset(routinen, handle)
7365
7366 my_alpha = 1.0_dp
7367 IF (PRESENT(alpha)) my_alpha = alpha
7368
7369 my_beta = 1.0_dp
7370 IF (PRESENT(beta)) my_beta = beta
7371
7372 my_allow_noncompatible_grids = .false.
7373 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7374
7375 IF (my_beta /= 1.0_dp) THEN
7376 IF (my_beta == 0.0_dp) THEN
7377 CALL pw_zero(pw2)
7378 ELSE
7379!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7380 pw2%array = pw2%array*my_beta
7381!$OMP END PARALLEL WORKSHARE
7382 END IF
7383 END IF
7384
7385 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7386 IF (my_alpha == 1.0_dp) THEN
7387!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7388 pw2%array = pw2%array + real(pw1%array, kind=dp)
7389!$OMP END PARALLEL WORKSHARE
7390 ELSE IF (my_alpha /= 0.0_dp) THEN
7391!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7392 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7393!$OMP END PARALLEL WORKSHARE
7394 END IF
7395
7396 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7397
7398 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7399 cpabort("Noncommensurate grids not implemented for 3D grids!")
7400
7401 IF (my_alpha == 1.0_dp) THEN
7402!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7403 pw2%array = pw2%array + real(pw1%array, kind=dp)
7404!$OMP END PARALLEL WORKSHARE
7405 ELSE
7406!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7407 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7408!$OMP END PARALLEL WORKSHARE
7409 END IF
7410
7411 ELSE
7412
7413 cpabort("Grids not compatible")
7414
7415 END IF
7416
7417 CALL timestop(handle)
7418
7419 END SUBROUTINE pw_axpy_c3d_r3d_gs
7420
7421! **************************************************************************************************
7422!> \brief pw_out = pw_out + alpha * pw1 * pw2
7423!> alpha defaults to 1
7424!> \param pw_out ...
7425!> \param pw1 ...
7426!> \param pw2 ...
7427!> \param alpha ...
7428!> \author JGH
7429! **************************************************************************************************
7430 SUBROUTINE pw_multiply_c3d_r3d_gs (pw_out, pw1, pw2, alpha)
7431
7432 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw_out
7433 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7434 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7435 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7436
7437 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7438
7439 INTEGER :: handle
7440 REAL(KIND=dp) :: my_alpha
7441
7442 CALL timeset(routinen, handle)
7443
7444 my_alpha = 1.0_dp
7445 IF (PRESENT(alpha)) my_alpha = alpha
7446
7447 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7448 cpabort("pw_multiply not implemented for non-identical grids!")
7449
7450#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
7451 IF (my_alpha == 1.0_dp) THEN
7452!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7453 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7454!$OMP END PARALLEL WORKSHARE
7455 ELSE
7456!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7457 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7458!$OMP END PARALLEL WORKSHARE
7459 END IF
7460#else
7461 IF (my_alpha == 1.0_dp) THEN
7462 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7463 ELSE
7464 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7465 END IF
7466#endif
7467
7468 CALL timestop(handle)
7469
7470 END SUBROUTINE pw_multiply_c3d_r3d_gs
7471
7472! **************************************************************************************************
7473!> \brief ...
7474!> \param pw1 ...
7475!> \param pw2 ...
7476! **************************************************************************************************
7477 SUBROUTINE pw_multiply_with_c3d_r3d_gs (pw1, pw2)
7478 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw1
7479 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7480
7481 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7482
7483 INTEGER :: handle
7484
7485 CALL timeset(routinen, handle)
7486
7487 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7488 cpabort("Incompatible grids!")
7489
7490!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7491 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7492!$OMP END PARALLEL WORKSHARE
7493
7494 CALL timestop(handle)
7495
7496 END SUBROUTINE pw_multiply_with_c3d_r3d_gs
7497
7498! **************************************************************************************************
7499!> \brief Calculate integral over unit cell for functions in plane wave basis
7500!> only returns the real part of it ......
7501!> \param pw1 ...
7502!> \param pw2 ...
7503!> \param sumtype ...
7504!> \param just_sum ...
7505!> \param local_only ...
7506!> \return ...
7507!> \par History
7508!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7509!> \author apsi
7510! **************************************************************************************************
7511 FUNCTION pw_integral_ab_c3d_r3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7512
7513 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7514 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7515 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7516 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7517 REAL(kind=dp) :: integral_value
7518
7519 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_r3d_gs'
7520
7521 INTEGER :: handle, loc_sumtype
7522 LOGICAL :: my_just_sum, my_local_only
7523
7524 CALL timeset(routinen, handle)
7525
7526 loc_sumtype = do_accurate_sum
7527 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7528
7529 my_local_only = .false.
7530 IF (PRESENT(local_only)) my_local_only = local_only
7531
7532 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7533 cpabort("Grids incompatible")
7534 END IF
7535
7536 my_just_sum = .false.
7537 IF (PRESENT(just_sum)) my_just_sum = just_sum
7538
7539 ! do standard sum
7540 IF (loc_sumtype == do_standard_sum) THEN
7541
7542 ! Do standard sum
7543
7544 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7545
7546 ELSE
7547
7548 ! Do accurate sum
7549 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7550
7551 END IF
7552
7553 IF (.NOT. my_just_sum) THEN
7554 integral_value = integral_value*pw1%pw_grid%vol
7555 END IF
7556
7557
7558 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7559 CALL pw1%pw_grid%para%group%sum(integral_value)
7560
7561 CALL timestop(handle)
7562
7563 END FUNCTION pw_integral_ab_c3d_r3d_gs
7564
7565! **************************************************************************************************
7566!> \brief copy a pw type variable
7567!> \param pw1 ...
7568!> \param pw2 ...
7569!> \par History
7570!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7571!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7572!> JGH (21-Feb-2003) : Code for generalized reference grids
7573!> \author apsi
7574!> \note
7575!> Currently only copying of respective types allowed,
7576!> in order to avoid errors
7577! **************************************************************************************************
7578 SUBROUTINE pw_copy_c3d_c3d_rs (pw1, pw2)
7579
7580 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7581 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
7582
7583 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7584
7585 INTEGER :: handle
7586
7587 CALL timeset(routinen, handle)
7588
7589 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7590 cpabort("Both grids must be either spherical or non-spherical!")
7591 IF (pw1%pw_grid%spherical) &
7592 cpabort("Spherical grids only exist in reciprocal space!")
7593
7594 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7595 cpabort("3D grids must be compatible!")
7596 IF (pw1%pw_grid%spherical) &
7597 cpabort("3D grids must not be spherical!")
7598!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7599 pw2%array(:, :, :) = pw1%array(:, :, :)
7600!$OMP END PARALLEL WORKSHARE
7601
7602 CALL timestop(handle)
7603
7604 END SUBROUTINE pw_copy_c3d_c3d_rs
7605
7606! **************************************************************************************************
7607!> \brief ...
7608!> \param pw ...
7609!> \param array ...
7610! **************************************************************************************************
7611 SUBROUTINE pw_copy_to_array_c3d_c3d_rs (pw, array)
7612 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7613 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7614
7615 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7616
7617 INTEGER :: handle
7618
7619 CALL timeset(routinen, handle)
7620
7621!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7622 array(:, :, :) = pw%array(:, :, :)
7623!$OMP END PARALLEL WORKSHARE
7624
7625 CALL timestop(handle)
7626 END SUBROUTINE pw_copy_to_array_c3d_c3d_rs
7627
7628! **************************************************************************************************
7629!> \brief ...
7630!> \param pw ...
7631!> \param array ...
7632! **************************************************************************************************
7633 SUBROUTINE pw_copy_from_array_c3d_c3d_rs (pw, array)
7634 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7635 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7636
7637 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7638
7639 INTEGER :: handle
7640
7641 CALL timeset(routinen, handle)
7642
7643!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7644 pw%array = array
7645!$OMP END PARALLEL WORKSHARE
7646
7647 CALL timestop(handle)
7648 END SUBROUTINE pw_copy_from_array_c3d_c3d_rs
7649
7650! **************************************************************************************************
7651!> \brief pw2 = alpha*pw1 + beta*pw2
7652!> alpha defaults to 1, beta defaults to 1
7653!> \param pw1 ...
7654!> \param pw2 ...
7655!> \param alpha ...
7656!> \param beta ...
7657!> \param allow_noncompatible_grids ...
7658!> \par History
7659!> JGH (21-Feb-2003) : added reference grid functionality
7660!> JGH (01-Dec-2007) : rename and remove complex alpha
7661!> \author apsi
7662!> \note
7663!> Currently only summing up of respective types allowed,
7664!> in order to avoid errors
7665! **************************************************************************************************
7666 SUBROUTINE pw_axpy_c3d_c3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7667
7668 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7669 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
7670 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7671 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7672
7673 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7674
7675 INTEGER :: handle
7676 LOGICAL :: my_allow_noncompatible_grids
7677 REAL(KIND=dp) :: my_alpha, my_beta
7678
7679 CALL timeset(routinen, handle)
7680
7681 my_alpha = 1.0_dp
7682 IF (PRESENT(alpha)) my_alpha = alpha
7683
7684 my_beta = 1.0_dp
7685 IF (PRESENT(beta)) my_beta = beta
7686
7687 my_allow_noncompatible_grids = .false.
7688 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7689
7690 IF (my_beta /= 1.0_dp) THEN
7691 IF (my_beta == 0.0_dp) THEN
7692 CALL pw_zero(pw2)
7693 ELSE
7694!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7695 pw2%array = pw2%array*my_beta
7696!$OMP END PARALLEL WORKSHARE
7697 END IF
7698 END IF
7699
7700 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7701 IF (my_alpha == 1.0_dp) THEN
7702!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7703 pw2%array = pw2%array + pw1%array
7704!$OMP END PARALLEL WORKSHARE
7705 ELSE IF (my_alpha /= 0.0_dp) THEN
7706!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7707 pw2%array = pw2%array + my_alpha* pw1%array
7708!$OMP END PARALLEL WORKSHARE
7709 END IF
7710
7711 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7712
7713 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7714 cpabort("Noncommensurate grids not implemented for 3D grids!")
7715
7716 IF (my_alpha == 1.0_dp) THEN
7717!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7718 pw2%array = pw2%array + pw1%array
7719!$OMP END PARALLEL WORKSHARE
7720 ELSE
7721!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7722 pw2%array = pw2%array + my_alpha* pw1%array
7723!$OMP END PARALLEL WORKSHARE
7724 END IF
7725
7726 ELSE
7727
7728 cpabort("Grids not compatible")
7729
7730 END IF
7731
7732 CALL timestop(handle)
7733
7734 END SUBROUTINE pw_axpy_c3d_c3d_rs
7735
7736! **************************************************************************************************
7737!> \brief pw_out = pw_out + alpha * pw1 * pw2
7738!> alpha defaults to 1
7739!> \param pw_out ...
7740!> \param pw1 ...
7741!> \param pw2 ...
7742!> \param alpha ...
7743!> \author JGH
7744! **************************************************************************************************
7745 SUBROUTINE pw_multiply_c3d_c3d_rs (pw_out, pw1, pw2, alpha)
7746
7747 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw_out
7748 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7749 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7750 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7751
7752 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7753
7754 INTEGER :: handle
7755 REAL(KIND=dp) :: my_alpha
7756
7757 CALL timeset(routinen, handle)
7758
7759 my_alpha = 1.0_dp
7760 IF (PRESENT(alpha)) my_alpha = alpha
7761
7762 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7763 cpabort("pw_multiply not implemented for non-identical grids!")
7764
7765#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
7766 IF (my_alpha == 1.0_dp) THEN
7767!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7768 pw_out%array = pw_out%array + pw1%array* pw2%array
7769!$OMP END PARALLEL WORKSHARE
7770 ELSE
7771!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7772 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
7773!$OMP END PARALLEL WORKSHARE
7774 END IF
7775#else
7776 IF (my_alpha == 1.0_dp) THEN
7777 pw_out%array = pw_out%array + pw1%array* pw2%array
7778 ELSE
7779 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
7780 END IF
7781#endif
7782
7783 CALL timestop(handle)
7784
7785 END SUBROUTINE pw_multiply_c3d_c3d_rs
7786
7787! **************************************************************************************************
7788!> \brief ...
7789!> \param pw1 ...
7790!> \param pw2 ...
7791! **************************************************************************************************
7792 SUBROUTINE pw_multiply_with_c3d_c3d_rs (pw1, pw2)
7793 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw1
7794 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7795
7796 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7797
7798 INTEGER :: handle
7799
7800 CALL timeset(routinen, handle)
7801
7802 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7803 cpabort("Incompatible grids!")
7804
7805!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7806 pw1%array = pw1%array* pw2%array
7807!$OMP END PARALLEL WORKSHARE
7808
7809 CALL timestop(handle)
7810
7811 END SUBROUTINE pw_multiply_with_c3d_c3d_rs
7812
7813! **************************************************************************************************
7814!> \brief Calculate integral over unit cell for functions in plane wave basis
7815!> only returns the real part of it ......
7816!> \param pw1 ...
7817!> \param pw2 ...
7818!> \param sumtype ...
7819!> \param just_sum ...
7820!> \param local_only ...
7821!> \return ...
7822!> \par History
7823!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7824!> \author apsi
7825! **************************************************************************************************
7826 FUNCTION pw_integral_ab_c3d_c3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7827
7828 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7829 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7830 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7831 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7832 REAL(kind=dp) :: integral_value
7833
7834 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_c3d_rs'
7835
7836 INTEGER :: handle, loc_sumtype
7837 LOGICAL :: my_just_sum, my_local_only
7838
7839 CALL timeset(routinen, handle)
7840
7841 loc_sumtype = do_accurate_sum
7842 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7843
7844 my_local_only = .false.
7845 IF (PRESENT(local_only)) my_local_only = local_only
7846
7847 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7848 cpabort("Grids incompatible")
7849 END IF
7850
7851 my_just_sum = .false.
7852 IF (PRESENT(just_sum)) my_just_sum = just_sum
7853
7854 ! do standard sum
7855 IF (loc_sumtype == do_standard_sum) THEN
7856
7857 ! Do standard sum
7858
7859 integral_value = sum(real(conjg(pw1%array) &
7860 *pw2%array, kind=dp)) !? complex bit
7861
7862 ELSE
7863
7864 ! Do accurate sum
7865 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
7866
7867 END IF
7868
7869 IF (.NOT. my_just_sum) THEN
7870 integral_value = integral_value*pw1%pw_grid%dvol
7871 END IF
7872
7873
7874 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7875 CALL pw1%pw_grid%para%group%sum(integral_value)
7876
7877 CALL timestop(handle)
7878
7879 END FUNCTION pw_integral_ab_c3d_c3d_rs
7880! **************************************************************************************************
7881!> \brief copy a pw type variable
7882!> \param pw1 ...
7883!> \param pw2 ...
7884!> \par History
7885!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7886!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7887!> JGH (21-Feb-2003) : Code for generalized reference grids
7888!> \author apsi
7889!> \note
7890!> Currently only copying of respective types allowed,
7891!> in order to avoid errors
7892! **************************************************************************************************
7893 SUBROUTINE pw_copy_c3d_c3d_gs (pw1, pw2)
7894
7895 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7896 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
7897
7898 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7899
7900 INTEGER :: handle
7901
7902 CALL timeset(routinen, handle)
7903
7904 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7905 cpabort("Both grids must be either spherical or non-spherical!")
7906
7907 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7908 cpabort("3D grids must be compatible!")
7909 IF (pw1%pw_grid%spherical) &
7910 cpabort("3D grids must not be spherical!")
7911!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7912 pw2%array(:, :, :) = pw1%array(:, :, :)
7913!$OMP END PARALLEL WORKSHARE
7914
7915 CALL timestop(handle)
7916
7917 END SUBROUTINE pw_copy_c3d_c3d_gs
7918
7919! **************************************************************************************************
7920!> \brief ...
7921!> \param pw ...
7922!> \param array ...
7923! **************************************************************************************************
7924 SUBROUTINE pw_copy_to_array_c3d_c3d_gs (pw, array)
7925 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7926 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7927
7928 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7929
7930 INTEGER :: handle
7931
7932 CALL timeset(routinen, handle)
7933
7934!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7935 array(:, :, :) = pw%array(:, :, :)
7936!$OMP END PARALLEL WORKSHARE
7937
7938 CALL timestop(handle)
7939 END SUBROUTINE pw_copy_to_array_c3d_c3d_gs
7940
7941! **************************************************************************************************
7942!> \brief ...
7943!> \param pw ...
7944!> \param array ...
7945! **************************************************************************************************
7946 SUBROUTINE pw_copy_from_array_c3d_c3d_gs (pw, array)
7947 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7948 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7949
7950 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7951
7952 INTEGER :: handle
7953
7954 CALL timeset(routinen, handle)
7955
7956!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7957 pw%array = array
7958!$OMP END PARALLEL WORKSHARE
7959
7960 CALL timestop(handle)
7961 END SUBROUTINE pw_copy_from_array_c3d_c3d_gs
7962
7963! **************************************************************************************************
7964!> \brief pw2 = alpha*pw1 + beta*pw2
7965!> alpha defaults to 1, beta defaults to 1
7966!> \param pw1 ...
7967!> \param pw2 ...
7968!> \param alpha ...
7969!> \param beta ...
7970!> \param allow_noncompatible_grids ...
7971!> \par History
7972!> JGH (21-Feb-2003) : added reference grid functionality
7973!> JGH (01-Dec-2007) : rename and remove complex alpha
7974!> \author apsi
7975!> \note
7976!> Currently only summing up of respective types allowed,
7977!> in order to avoid errors
7978! **************************************************************************************************
7979 SUBROUTINE pw_axpy_c3d_c3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7980
7981 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7982 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
7983 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7984 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7985
7986 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7987
7988 INTEGER :: handle
7989 LOGICAL :: my_allow_noncompatible_grids
7990 REAL(KIND=dp) :: my_alpha, my_beta
7991
7992 CALL timeset(routinen, handle)
7993
7994 my_alpha = 1.0_dp
7995 IF (PRESENT(alpha)) my_alpha = alpha
7996
7997 my_beta = 1.0_dp
7998 IF (PRESENT(beta)) my_beta = beta
7999
8000 my_allow_noncompatible_grids = .false.
8001 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
8002
8003 IF (my_beta /= 1.0_dp) THEN
8004 IF (my_beta == 0.0_dp) THEN
8005 CALL pw_zero(pw2)
8006 ELSE
8007!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
8008 pw2%array = pw2%array*my_beta
8009!$OMP END PARALLEL WORKSHARE
8010 END IF
8011 END IF
8012
8013 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8014 IF (my_alpha == 1.0_dp) THEN
8015!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
8016 pw2%array = pw2%array + pw1%array
8017!$OMP END PARALLEL WORKSHARE
8018 ELSE IF (my_alpha /= 0.0_dp) THEN
8019!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
8020 pw2%array = pw2%array + my_alpha* pw1%array
8021!$OMP END PARALLEL WORKSHARE
8022 END IF
8023
8024 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
8025
8026 IF (any(shape(pw1%array) /= shape(pw2%array))) &
8027 cpabort("Noncommensurate grids not implemented for 3D grids!")
8028
8029 IF (my_alpha == 1.0_dp) THEN
8030!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
8031 pw2%array = pw2%array + pw1%array
8032!$OMP END PARALLEL WORKSHARE
8033 ELSE
8034!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
8035 pw2%array = pw2%array + my_alpha* pw1%array
8036!$OMP END PARALLEL WORKSHARE
8037 END IF
8038
8039 ELSE
8040
8041 cpabort("Grids not compatible")
8042
8043 END IF
8044
8045 CALL timestop(handle)
8046
8047 END SUBROUTINE pw_axpy_c3d_c3d_gs
8048
8049! **************************************************************************************************
8050!> \brief pw_out = pw_out + alpha * pw1 * pw2
8051!> alpha defaults to 1
8052!> \param pw_out ...
8053!> \param pw1 ...
8054!> \param pw2 ...
8055!> \param alpha ...
8056!> \author JGH
8057! **************************************************************************************************
8058 SUBROUTINE pw_multiply_c3d_c3d_gs (pw_out, pw1, pw2, alpha)
8059
8060 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw_out
8061 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
8062 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
8063 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
8064
8065 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
8066
8067 INTEGER :: handle
8068 REAL(KIND=dp) :: my_alpha
8069
8070 CALL timeset(routinen, handle)
8071
8072 my_alpha = 1.0_dp
8073 IF (PRESENT(alpha)) my_alpha = alpha
8074
8075 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
8076 cpabort("pw_multiply not implemented for non-identical grids!")
8077
8078#if !defined(__INTEL_LLVM_COMPILER) || (20250000 <= __INTEL_LLVM_COMPILER)
8079 IF (my_alpha == 1.0_dp) THEN
8080!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
8081 pw_out%array = pw_out%array + pw1%array* pw2%array
8082!$OMP END PARALLEL WORKSHARE
8083 ELSE
8084!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
8085 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
8086!$OMP END PARALLEL WORKSHARE
8087 END IF
8088#else
8089 IF (my_alpha == 1.0_dp) THEN
8090 pw_out%array = pw_out%array + pw1%array* pw2%array
8091 ELSE
8092 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
8093 END IF
8094#endif
8095
8096 CALL timestop(handle)
8097
8098 END SUBROUTINE pw_multiply_c3d_c3d_gs
8099
8100! **************************************************************************************************
8101!> \brief ...
8102!> \param pw1 ...
8103!> \param pw2 ...
8104! **************************************************************************************************
8105 SUBROUTINE pw_multiply_with_c3d_c3d_gs (pw1, pw2)
8106 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw1
8107 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
8108
8109 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
8110
8111 INTEGER :: handle
8112
8113 CALL timeset(routinen, handle)
8114
8115 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
8116 cpabort("Incompatible grids!")
8117
8118!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
8119 pw1%array = pw1%array* pw2%array
8120!$OMP END PARALLEL WORKSHARE
8121
8122 CALL timestop(handle)
8123
8124 END SUBROUTINE pw_multiply_with_c3d_c3d_gs
8125
8126! **************************************************************************************************
8127!> \brief Calculate integral over unit cell for functions in plane wave basis
8128!> only returns the real part of it ......
8129!> \param pw1 ...
8130!> \param pw2 ...
8131!> \param sumtype ...
8132!> \param just_sum ...
8133!> \param local_only ...
8134!> \return ...
8135!> \par History
8136!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
8137!> \author apsi
8138! **************************************************************************************************
8139 FUNCTION pw_integral_ab_c3d_c3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
8140
8141 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
8142 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
8143 INTEGER, INTENT(IN), OPTIONAL :: sumtype
8144 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
8145 REAL(kind=dp) :: integral_value
8146
8147 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab_c3d_c3d_gs'
8148
8149 INTEGER :: handle, loc_sumtype
8150 LOGICAL :: my_just_sum, my_local_only
8151
8152 CALL timeset(routinen, handle)
8153
8154 loc_sumtype = do_accurate_sum
8155 IF (PRESENT(sumtype)) loc_sumtype = sumtype
8156
8157 my_local_only = .false.
8158 IF (PRESENT(local_only)) my_local_only = local_only
8159
8160 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8161 cpabort("Grids incompatible")
8162 END IF
8163
8164 my_just_sum = .false.
8165 IF (PRESENT(just_sum)) my_just_sum = just_sum
8166
8167 ! do standard sum
8168 IF (loc_sumtype == do_standard_sum) THEN
8169
8170 ! Do standard sum
8171
8172 integral_value = sum(real(conjg(pw1%array) &
8173 *pw2%array, kind=dp)) !? complex bit
8174
8175 ELSE
8176
8177 ! Do accurate sum
8178 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp))
8179
8180 END IF
8181
8182 IF (.NOT. my_just_sum) THEN
8183 integral_value = integral_value*pw1%pw_grid%vol
8184 END IF
8185
8186
8187 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
8188 CALL pw1%pw_grid%para%group%sum(integral_value)
8189
8190 CALL timestop(handle)
8191
8192 END FUNCTION pw_integral_ab_c3d_c3d_gs
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206! **************************************************************************************************
8207!> \brief Gathers the pw vector from a 3d data field
8208!> \param pw ...
8209!> \param c ...
8210!> \param scale ...
8211!> \par History
8212!> none
8213!> \author JGH
8214! **************************************************************************************************
8215 SUBROUTINE pw_gather_s_r1d_r3d_2(pw1, pw2, scale)
8216
8217 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
8218 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
8219 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8220
8221 CALL pw_gather_s_r1d_r3d (pw2, pw1%array, scale)
8222
8223 END SUBROUTINE pw_gather_s_r1d_r3d_2
8224
8225! **************************************************************************************************
8226!> \brief Gathers the pw vector from a 3d data field
8227!> \param pw ...
8228!> \param c ...
8229!> \param scale ...
8230!> \par History
8231!> none
8232!> \author JGH
8233! **************************************************************************************************
8234 SUBROUTINE pw_gather_s_r1d_r3d (pw, c, scale)
8235
8236 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
8237 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
8238 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8239
8240 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
8241
8242 INTEGER :: gpt, handle, l, m, n
8243
8244 CALL timeset(routinen, handle)
8245
8246 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8247 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
8248
8249 IF (PRESENT(scale)) THEN
8250!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8251 DO gpt = 1, ngpts
8252 l = mapl(ghat(1, gpt)) + 1
8253 m = mapm(ghat(2, gpt)) + 1
8254 n = mapn(ghat(3, gpt)) + 1
8255 pw%array(gpt) = scale* c(l, m, n)
8256 END DO
8257!$OMP END PARALLEL DO
8258 ELSE
8259!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8260 DO gpt = 1, ngpts
8261 l = mapl(ghat(1, gpt)) + 1
8262 m = mapm(ghat(2, gpt)) + 1
8263 n = mapn(ghat(3, gpt)) + 1
8264 pw%array(gpt) = c(l, m, n)
8265 END DO
8266!$OMP END PARALLEL DO
8267 END IF
8268
8269 END associate
8270
8271 CALL timestop(handle)
8272
8273 END SUBROUTINE pw_gather_s_r1d_r3d
8274
8275! **************************************************************************************************
8276!> \brief Scatters a pw vector to a 3d data field
8277!> \param pw ...
8278!> \param c ...
8279!> \param scale ...
8280!> \par History
8281!> none
8282!> \author JGH
8283! **************************************************************************************************
8284 SUBROUTINE pw_scatter_s_r1d_r3d_2(pw1, pw2, scale)
8285
8286 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
8287 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
8288 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8289
8290 CALL pw_scatter_s_r1d_r3d (pw1, pw2%array, scale)
8291
8292 END SUBROUTINE pw_scatter_s_r1d_r3d_2
8293
8294! **************************************************************************************************
8295!> \brief Scatters a pw vector to a 3d data field
8296!> \param pw ...
8297!> \param c ...
8298!> \param scale ...
8299!> \par History
8300!> none
8301!> \author JGH
8302! **************************************************************************************************
8303 SUBROUTINE pw_scatter_s_r1d_r3d (pw, c, scale)
8304
8305 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
8306 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
8307 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8308
8309 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
8310
8311 INTEGER :: gpt, handle, l, m, n
8312
8313 CALL timeset(routinen, handle)
8314
8315 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8316 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8317
8318 ! should only zero the unused bits (but the zero is needed)
8319 IF (.NOT. PRESENT(scale)) c = 0.0_dp
8320
8321 IF (PRESENT(scale)) THEN
8322!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8323 DO gpt = 1, ngpts
8324 l = mapl(ghat(1, gpt)) + 1
8325 m = mapm(ghat(2, gpt)) + 1
8326 n = mapn(ghat(3, gpt)) + 1
8327 c(l, m, n) = scale* pw%array(gpt)
8328 END DO
8329!$OMP END PARALLEL DO
8330 ELSE
8331!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8332 DO gpt = 1, ngpts
8333 l = mapl(ghat(1, gpt)) + 1
8334 m = mapm(ghat(2, gpt)) + 1
8335 n = mapn(ghat(3, gpt)) + 1
8336 c(l, m, n) = pw%array(gpt)
8337 END DO
8338!$OMP END PARALLEL DO
8339 END IF
8340
8341 END associate
8342
8343 IF (pw%pw_grid%grid_span == halfspace) THEN
8344
8345 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
8346 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8347
8348 IF (PRESENT(scale)) THEN
8349!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8350 DO gpt = 1, ngpts
8351 l = mapl(ghat(1, gpt)) + 1
8352 m = mapm(ghat(2, gpt)) + 1
8353 n = mapn(ghat(3, gpt)) + 1
8354 c(l, m, n) = scale*( pw%array(gpt))
8355 END DO
8356!$OMP END PARALLEL DO
8357 ELSE
8358!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8359 DO gpt = 1, ngpts
8360 l = mapl(ghat(1, gpt)) + 1
8361 m = mapm(ghat(2, gpt)) + 1
8362 n = mapn(ghat(3, gpt)) + 1
8363 c(l, m, n) = ( pw%array(gpt))
8364 END DO
8365!$OMP END PARALLEL DO
8366 END IF
8367
8368 END associate
8369
8370 END IF
8371
8372 CALL timestop(handle)
8373
8374 END SUBROUTINE pw_scatter_s_r1d_r3d
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386! **************************************************************************************************
8387!> \brief Gathers the pw vector from a 3d data field
8388!> \param pw ...
8389!> \param c ...
8390!> \param scale ...
8391!> \par History
8392!> none
8393!> \author JGH
8394! **************************************************************************************************
8395 SUBROUTINE pw_gather_s_r1d_c3d_2(pw1, pw2, scale)
8396
8397 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
8398 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
8399 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8400
8401 CALL pw_gather_s_r1d_c3d (pw2, pw1%array, scale)
8402
8403 END SUBROUTINE pw_gather_s_r1d_c3d_2
8404
8405! **************************************************************************************************
8406!> \brief Gathers the pw vector from a 3d data field
8407!> \param pw ...
8408!> \param c ...
8409!> \param scale ...
8410!> \par History
8411!> none
8412!> \author JGH
8413! **************************************************************************************************
8414 SUBROUTINE pw_gather_s_r1d_c3d (pw, c, scale)
8415
8416 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
8417 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
8418 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8419
8420 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
8421
8422 INTEGER :: gpt, handle, l, m, n
8423
8424 CALL timeset(routinen, handle)
8425
8426 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8427 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
8428
8429 IF (PRESENT(scale)) THEN
8430!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8431 DO gpt = 1, ngpts
8432 l = mapl(ghat(1, gpt)) + 1
8433 m = mapm(ghat(2, gpt)) + 1
8434 n = mapn(ghat(3, gpt)) + 1
8435 pw%array(gpt) = scale* real(c(l, m, n), kind=dp)
8436 END DO
8437!$OMP END PARALLEL DO
8438 ELSE
8439!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8440 DO gpt = 1, ngpts
8441 l = mapl(ghat(1, gpt)) + 1
8442 m = mapm(ghat(2, gpt)) + 1
8443 n = mapn(ghat(3, gpt)) + 1
8444 pw%array(gpt) = real(c(l, m, n), kind=dp)
8445 END DO
8446!$OMP END PARALLEL DO
8447 END IF
8448
8449 END associate
8450
8451 CALL timestop(handle)
8452
8453 END SUBROUTINE pw_gather_s_r1d_c3d
8454
8455! **************************************************************************************************
8456!> \brief Scatters a pw vector to a 3d data field
8457!> \param pw ...
8458!> \param c ...
8459!> \param scale ...
8460!> \par History
8461!> none
8462!> \author JGH
8463! **************************************************************************************************
8464 SUBROUTINE pw_scatter_s_r1d_c3d_2(pw1, pw2, scale)
8465
8466 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
8467 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
8468 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8469
8470 CALL pw_scatter_s_r1d_c3d (pw1, pw2%array, scale)
8471
8472 END SUBROUTINE pw_scatter_s_r1d_c3d_2
8473
8474! **************************************************************************************************
8475!> \brief Scatters a pw vector to a 3d data field
8476!> \param pw ...
8477!> \param c ...
8478!> \param scale ...
8479!> \par History
8480!> none
8481!> \author JGH
8482! **************************************************************************************************
8483 SUBROUTINE pw_scatter_s_r1d_c3d (pw, c, scale)
8484
8485 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
8486 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
8487 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8488
8489 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
8490
8491 INTEGER :: gpt, handle, l, m, n
8492
8493 CALL timeset(routinen, handle)
8494
8495 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8496 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8497
8498 ! should only zero the unused bits (but the zero is needed)
8499 IF (.NOT. PRESENT(scale)) c = 0.0_dp
8500
8501 IF (PRESENT(scale)) THEN
8502!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8503 DO gpt = 1, ngpts
8504 l = mapl(ghat(1, gpt)) + 1
8505 m = mapm(ghat(2, gpt)) + 1
8506 n = mapn(ghat(3, gpt)) + 1
8507 c(l, m, n) = scale* cmplx(pw%array(gpt), 0.0_dp, kind=dp)
8508 END DO
8509!$OMP END PARALLEL DO
8510 ELSE
8511!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8512 DO gpt = 1, ngpts
8513 l = mapl(ghat(1, gpt)) + 1
8514 m = mapm(ghat(2, gpt)) + 1
8515 n = mapn(ghat(3, gpt)) + 1
8516 c(l, m, n) = cmplx(pw%array(gpt), 0.0_dp, kind=dp)
8517 END DO
8518!$OMP END PARALLEL DO
8519 END IF
8520
8521 END associate
8522
8523 IF (pw%pw_grid%grid_span == halfspace) THEN
8524
8525 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
8526 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8527
8528 IF (PRESENT(scale)) THEN
8529!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8530 DO gpt = 1, ngpts
8531 l = mapl(ghat(1, gpt)) + 1
8532 m = mapm(ghat(2, gpt)) + 1
8533 n = mapn(ghat(3, gpt)) + 1
8534 c(l, m, n) = scale*( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
8535 END DO
8536!$OMP END PARALLEL DO
8537 ELSE
8538!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8539 DO gpt = 1, ngpts
8540 l = mapl(ghat(1, gpt)) + 1
8541 m = mapm(ghat(2, gpt)) + 1
8542 n = mapn(ghat(3, gpt)) + 1
8543 c(l, m, n) = ( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
8544 END DO
8545!$OMP END PARALLEL DO
8546 END IF
8547
8548 END associate
8549
8550 END IF
8551
8552 CALL timestop(handle)
8553
8554 END SUBROUTINE pw_scatter_s_r1d_c3d
8555
8556
8557
8558
8559
8560
8561
8562
8563
8564
8565
8566
8567! **************************************************************************************************
8568!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8569!> \param pw1 ...
8570!> \param pw2 ...
8571!> \param debug ...
8572!> \par History
8573!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8574!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8575!> pw_types, no more coefficient types
8576!> \author apsi
8577!> \note
8578!> fft_wrap_pw1pw2
8579! **************************************************************************************************
8580 SUBROUTINE fft_wrap_pw1pw2_r3d_c1d_rs_gs (pw1, pw2, debug)
8581
8582 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
8583 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
8584 LOGICAL, INTENT(IN), OPTIONAL :: debug
8585
8586 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8587
8588 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8589 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8590 INTEGER :: handle, handle2, my_pos, nrays, &
8591 out_unit
8592 INTEGER, DIMENSION(3) :: nloc
8593#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8594 LOGICAL :: use_pw_gpu
8595#endif
8596 INTEGER, DIMENSION(:), POINTER :: n
8597 LOGICAL :: test
8598
8599 CALL timeset(routinen, handle2)
8600 out_unit = cp_logger_get_default_io_unit()
8601 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8602 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8603
8604 NULLIFY (c_in)
8605 NULLIFY (c_out)
8606
8607 IF (PRESENT(debug)) THEN
8608 test = debug
8609 ELSE
8610 test = .false.
8611 END IF
8612
8613 !..check if grids are compatible
8614 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8615 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8616 cpabort("PW grids not compatible")
8617 END IF
8618 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8619 cpabort("PW grids have not compatible MPI groups")
8620 END IF
8621 END IF
8622
8623 n => pw1%pw_grid%npts
8624
8625 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8626
8627 !
8628 !..replicated data, use local FFT
8629 !
8630
8631 IF (test .AND. out_unit > 0) THEN
8632 WRITE (out_unit, '(A)') " FFT Protocol "
8633 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8634 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8635 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8636 END IF
8637
8638#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8639 CALL pw_gpu_r3dc1d_3d(pw1, pw2)
8640#elif defined (__PW_FPGA)
8641 ALLOCATE (c_out(n(1), n(2), n(3)))
8642 ! check if bitstream for the fft size is present
8643 ! if not, perform fft3d in CPU
8644 IF (pw_fpga_init_bitstream(n) == 1) THEN
8645 CALL pw_copy_to_array(pw1, c_out)
8646#if (__PW_FPGA_SP && __PW_FPGA)
8647 CALL pw_fpga_r3dc1d_3d_sp(n, c_out)
8648#else
8649 CALL pw_fpga_r3dc1d_3d_dp(n, c_out)
8650#endif
8651 CALL zdscal(n(1)*n(2)*n(3), 1.0_dp/pw1%pw_grid%ngpts, c_out, 1)
8652 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8653 ELSE
8654 CALL pw_copy_to_array(pw1, c_out)
8655 CALL fft3d(fwfft, n, c_out, debug=test)
8656 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8657 END IF
8658 DEALLOCATE (c_out)
8659#else
8660 ALLOCATE (c_out(n(1), n(2), n(3)))
8661 c_out = 0.0_dp
8662 CALL pw_copy_to_array(pw1, c_out)
8663 CALL fft3d(fwfft, n, c_out, debug=test)
8664 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8665 DEALLOCATE (c_out)
8666#endif
8667
8668 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8669
8670 ELSE
8671
8672 !
8673 !..parallel FFT
8674 !
8675
8676 IF (test .AND. out_unit > 0) THEN
8677 WRITE (out_unit, '(A)') " FFT Protocol "
8678 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8679 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8680 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8681 END IF
8682
8683 my_pos = pw1%pw_grid%para%group%mepos
8684 nrays = pw1%pw_grid%para%nyzray(my_pos)
8685 grays => pw1%pw_grid%grays
8686
8687#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8688 ! (no ray dist. is not efficient in CUDA)
8689 use_pw_gpu = pw1%pw_grid%para%ray_distribution
8690 IF (use_pw_gpu) THEN
8691 CALL pw_gpu_r3dc1d_3d_ps(pw1, pw2)
8692 ELSE
8693#endif
8694!.. prepare input
8695 nloc = pw1%pw_grid%npts_local
8696 ALLOCATE (c_in(nloc(1), nloc(2), nloc(3)))
8697 CALL pw_copy_to_array(pw1, c_in)
8698 grays = z_zero
8699 !..transform
8700 IF (pw1%pw_grid%para%ray_distribution) THEN
8701 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8702 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
8703 pw1%pw_grid%para%bo, debug=test)
8704 ELSE
8705 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8706 pw1%pw_grid%para%bo, debug=test)
8707 END IF
8708 !..prepare output
8709 IF (test .AND. out_unit > 0) &
8710 WRITE (out_unit, '(A)') " PW_GATHER : 2d -> 1d "
8711 CALL pw_gather_p_c1d (pw2, grays)
8712 DEALLOCATE (c_in)
8713
8714#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8715 END IF
8716#endif
8717 END IF
8718
8719 IF (test .AND. out_unit > 0) THEN
8720 WRITE (out_unit, '(A)') " End of FFT Protocol "
8721 END IF
8722
8723 CALL timestop(handle)
8724 CALL timestop(handle2)
8725
8726 END SUBROUTINE fft_wrap_pw1pw2_r3d_c1d_rs_gs
8727
8728
8729
8730
8731
8732! **************************************************************************************************
8733!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8734!> \param pw1 ...
8735!> \param pw2 ...
8736!> \param debug ...
8737!> \par History
8738!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8739!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8740!> pw_types, no more coefficient types
8741!> \author apsi
8742!> \note
8743!> fft_wrap_pw1pw2
8744! **************************************************************************************************
8745 SUBROUTINE fft_wrap_pw1pw2_r3d_c3d_rs_gs (pw1, pw2, debug)
8746
8747 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
8748 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
8749 LOGICAL, INTENT(IN), OPTIONAL :: debug
8750
8751 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8752
8753 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8754 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8755 INTEGER :: handle, handle2, my_pos, nrays, &
8756 out_unit
8757 INTEGER, DIMENSION(:), POINTER :: n
8758 LOGICAL :: test
8759
8760 CALL timeset(routinen, handle2)
8761 out_unit = cp_logger_get_default_io_unit()
8762 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8763 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8764
8765 NULLIFY (c_in)
8766 NULLIFY (c_out)
8767
8768 IF (PRESENT(debug)) THEN
8769 test = debug
8770 ELSE
8771 test = .false.
8772 END IF
8773
8774 !..check if grids are compatible
8775 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8776 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8777 cpabort("PW grids not compatible")
8778 END IF
8779 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8780 cpabort("PW grids have not compatible MPI groups")
8781 END IF
8782 END IF
8783
8784 n => pw1%pw_grid%npts
8785
8786 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8787
8788 !
8789 !..replicated data, use local FFT
8790 !
8791
8792 IF (test .AND. out_unit > 0) THEN
8793 WRITE (out_unit, '(A)') " FFT Protocol "
8794 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8795 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8796 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8797 END IF
8798
8799 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
8800 c_out => pw2%array
8801 CALL fft3d(fwfft, n, c_out, debug=test)
8802
8803 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8804
8805 ELSE
8806
8807 !
8808 !..parallel FFT
8809 !
8810
8811 IF (test .AND. out_unit > 0) THEN
8812 WRITE (out_unit, '(A)') " FFT Protocol "
8813 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8814 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8815 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8816 END IF
8817
8818 my_pos = pw1%pw_grid%para%group%mepos
8819 nrays = pw1%pw_grid%para%nyzray(my_pos)
8820 grays => pw1%pw_grid%grays
8821
8822 END IF
8823
8824 IF (test .AND. out_unit > 0) THEN
8825 WRITE (out_unit, '(A)') " End of FFT Protocol "
8826 END IF
8827
8828 CALL timestop(handle)
8829 CALL timestop(handle2)
8830
8831 END SUBROUTINE fft_wrap_pw1pw2_r3d_c3d_rs_gs
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842
8843! **************************************************************************************************
8844!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8845!> \param pw1 ...
8846!> \param pw2 ...
8847!> \param debug ...
8848!> \par History
8849!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8850!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8851!> pw_types, no more coefficient types
8852!> \author apsi
8853!> \note
8854!> fft_wrap_pw1pw2
8855! **************************************************************************************************
8856 SUBROUTINE fft_wrap_pw1pw2_c1d_r3d_gs_rs (pw1, pw2, debug)
8857
8858 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
8859 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
8860 LOGICAL, INTENT(IN), OPTIONAL :: debug
8861
8862 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8863
8864 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8865 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8866 INTEGER :: handle, handle2, my_pos, nrays, &
8867 out_unit
8868 INTEGER, DIMENSION(3) :: nloc
8869#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8870 LOGICAL :: use_pw_gpu
8871#endif
8872 INTEGER, DIMENSION(:), POINTER :: n
8873 LOGICAL :: test
8874
8875 CALL timeset(routinen, handle2)
8876 out_unit = cp_logger_get_default_io_unit()
8877 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8878 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8879
8880 NULLIFY (c_in)
8881 NULLIFY (c_out)
8882
8883 IF (PRESENT(debug)) THEN
8884 test = debug
8885 ELSE
8886 test = .false.
8887 END IF
8888
8889 !..check if grids are compatible
8890 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8891 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8892 cpabort("PW grids not compatible")
8893 END IF
8894 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8895 cpabort("PW grids have not compatible MPI groups")
8896 END IF
8897 END IF
8898
8899 n => pw1%pw_grid%npts
8900
8901 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8902
8903 !
8904 !..replicated data, use local FFT
8905 !
8906
8907 IF (test .AND. out_unit > 0) THEN
8908 WRITE (out_unit, '(A)') " FFT Protocol "
8909 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
8910 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
8911 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
8912 END IF
8913
8914#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8915 CALL pw_gpu_c1dr3d_3d(pw1, pw2)
8916#elif defined (__PW_FPGA)
8917 ALLOCATE (c_out(n(1), n(2), n(3)))
8918 ! check if bitstream for the fft size is present
8919 ! if not, perform fft3d in CPU
8920 IF (pw_fpga_init_bitstream(n) == 1) THEN
8921 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8922 ! transform using FPGA
8923#if (__PW_FPGA_SP && __PW_FPGA)
8924 CALL pw_fpga_c1dr3d_3d_sp(n, c_out)
8925#else
8926 CALL pw_fpga_c1dr3d_3d_dp(n, c_out)
8927#endif
8928 CALL zdscal(n(1)*n(2)*n(3), 1.0_dp, c_out, 1)
8929 ! use real part only
8930 CALL pw_copy_from_array(pw2, c_out)
8931 ELSE
8932 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
8933 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8934 ! transform
8935 CALL fft3d(bwfft, n, c_out, debug=test)
8936 ! use real part only
8937 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
8938 CALL pw_copy_from_array(pw2, c_out)
8939 END IF
8940 DEALLOCATE (c_out)
8941#else
8942 ALLOCATE (c_out(n(1), n(2), n(3)))
8943 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
8944 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8945 ! transform
8946 CALL fft3d(bwfft, n, c_out, debug=test)
8947 ! use real part only
8948 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
8949 CALL pw_copy_from_array(pw2, c_out)
8950 DEALLOCATE (c_out)
8951#endif
8952
8953 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8954
8955 ELSE
8956
8957 !
8958 !..parallel FFT
8959 !
8960
8961 IF (test .AND. out_unit > 0) THEN
8962 WRITE (out_unit, '(A)') " FFT Protocol "
8963 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
8964 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
8965 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
8966 END IF
8967
8968 my_pos = pw1%pw_grid%para%group%mepos
8969 nrays = pw1%pw_grid%para%nyzray(my_pos)
8970 grays => pw1%pw_grid%grays
8971
8972#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8973 ! (no ray dist. is not efficient in CUDA)
8974 use_pw_gpu = pw1%pw_grid%para%ray_distribution
8975 IF (use_pw_gpu) THEN
8976 CALL pw_gpu_c1dr3d_3d_ps(pw1, pw2)
8977 ELSE
8978#endif
8979!.. prepare input
8980 IF (test .AND. out_unit > 0) &
8981 WRITE (out_unit, '(A)') " PW_SCATTER : 2d -> 1d "
8982 grays = z_zero
8983 CALL pw_scatter_p_c1d (pw1, grays)
8984 nloc = pw2%pw_grid%npts_local
8985 ALLOCATE (c_in(nloc(1), nloc(2), nloc(3)))
8986 !..transform
8987 IF (pw1%pw_grid%para%ray_distribution) THEN
8988 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8989 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
8990 pw1%pw_grid%para%bo, debug=test)
8991 ELSE
8992 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8993 pw1%pw_grid%para%bo, debug=test)
8994 END IF
8995 !..prepare output
8996 IF (test .AND. out_unit > 0) &
8997 WRITE (out_unit, '(A)') " Real part "
8998 CALL pw_copy_from_array(pw2, c_in)
8999 DEALLOCATE (c_in)
9000#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
9001 END IF
9002#endif
9003 END IF
9004
9005 IF (test .AND. out_unit > 0) THEN
9006 WRITE (out_unit, '(A)') " End of FFT Protocol "
9007 END IF
9008
9009 CALL timestop(handle)
9010 CALL timestop(handle2)
9011
9012 END SUBROUTINE fft_wrap_pw1pw2_c1d_r3d_gs_rs
9013
9014
9015
9016! **************************************************************************************************
9017!> \brief Gathers the pw vector from a 3d data field
9018!> \param pw ...
9019!> \param c ...
9020!> \param scale ...
9021!> \par History
9022!> none
9023!> \author JGH
9024! **************************************************************************************************
9025 SUBROUTINE pw_gather_s_c1d_r3d_2(pw1, pw2, scale)
9026
9027 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
9028 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9029 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9030
9031 CALL pw_gather_s_c1d_r3d (pw2, pw1%array, scale)
9032
9033 END SUBROUTINE pw_gather_s_c1d_r3d_2
9034
9035! **************************************************************************************************
9036!> \brief Gathers the pw vector from a 3d data field
9037!> \param pw ...
9038!> \param c ...
9039!> \param scale ...
9040!> \par History
9041!> none
9042!> \author JGH
9043! **************************************************************************************************
9044 SUBROUTINE pw_gather_s_c1d_r3d (pw, c, scale)
9045
9046 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9047 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
9048 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9049
9050 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
9051
9052 INTEGER :: gpt, handle, l, m, n
9053
9054 CALL timeset(routinen, handle)
9055
9056 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9057 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
9058
9059 IF (PRESENT(scale)) THEN
9060!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9061 DO gpt = 1, ngpts
9062 l = mapl(ghat(1, gpt)) + 1
9063 m = mapm(ghat(2, gpt)) + 1
9064 n = mapn(ghat(3, gpt)) + 1
9065 pw%array(gpt) = scale* cmplx(c(l, m, n), 0.0_dp, kind=dp)
9066 END DO
9067!$OMP END PARALLEL DO
9068 ELSE
9069!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9070 DO gpt = 1, ngpts
9071 l = mapl(ghat(1, gpt)) + 1
9072 m = mapm(ghat(2, gpt)) + 1
9073 n = mapn(ghat(3, gpt)) + 1
9074 pw%array(gpt) = cmplx(c(l, m, n), 0.0_dp, kind=dp)
9075 END DO
9076!$OMP END PARALLEL DO
9077 END IF
9078
9079 END associate
9080
9081 CALL timestop(handle)
9082
9083 END SUBROUTINE pw_gather_s_c1d_r3d
9084
9085! **************************************************************************************************
9086!> \brief Scatters a pw vector to a 3d data field
9087!> \param pw ...
9088!> \param c ...
9089!> \param scale ...
9090!> \par History
9091!> none
9092!> \author JGH
9093! **************************************************************************************************
9094 SUBROUTINE pw_scatter_s_c1d_r3d_2(pw1, pw2, scale)
9095
9096 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9097 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
9098 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9099
9100 CALL pw_scatter_s_c1d_r3d (pw1, pw2%array, scale)
9101
9102 END SUBROUTINE pw_scatter_s_c1d_r3d_2
9103
9104! **************************************************************************************************
9105!> \brief Scatters a pw vector to a 3d data field
9106!> \param pw ...
9107!> \param c ...
9108!> \param scale ...
9109!> \par History
9110!> none
9111!> \author JGH
9112! **************************************************************************************************
9113 SUBROUTINE pw_scatter_s_c1d_r3d (pw, c, scale)
9114
9115 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9116 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
9117 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9118
9119 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
9120
9121 INTEGER :: gpt, handle, l, m, n
9122
9123 CALL timeset(routinen, handle)
9124
9125 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9126 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9127
9128 ! should only zero the unused bits (but the zero is needed)
9129 IF (.NOT. PRESENT(scale)) c = 0.0_dp
9130
9131 IF (PRESENT(scale)) THEN
9132!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9133 DO gpt = 1, ngpts
9134 l = mapl(ghat(1, gpt)) + 1
9135 m = mapm(ghat(2, gpt)) + 1
9136 n = mapn(ghat(3, gpt)) + 1
9137 c(l, m, n) = scale* real(pw%array(gpt), kind=dp)
9138 END DO
9139!$OMP END PARALLEL DO
9140 ELSE
9141!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9142 DO gpt = 1, ngpts
9143 l = mapl(ghat(1, gpt)) + 1
9144 m = mapm(ghat(2, gpt)) + 1
9145 n = mapn(ghat(3, gpt)) + 1
9146 c(l, m, n) = real(pw%array(gpt), kind=dp)
9147 END DO
9148!$OMP END PARALLEL DO
9149 END IF
9150
9151 END associate
9152
9153 IF (pw%pw_grid%grid_span == halfspace) THEN
9154
9155 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
9156 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9157
9158 IF (PRESENT(scale)) THEN
9159!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9160 DO gpt = 1, ngpts
9161 l = mapl(ghat(1, gpt)) + 1
9162 m = mapm(ghat(2, gpt)) + 1
9163 n = mapn(ghat(3, gpt)) + 1
9164 c(l, m, n) = scale*( real(pw%array(gpt), kind=dp))
9165 END DO
9166!$OMP END PARALLEL DO
9167 ELSE
9168!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9169 DO gpt = 1, ngpts
9170 l = mapl(ghat(1, gpt)) + 1
9171 m = mapm(ghat(2, gpt)) + 1
9172 n = mapn(ghat(3, gpt)) + 1
9173 c(l, m, n) = ( real(pw%array(gpt), kind=dp))
9174 END DO
9175!$OMP END PARALLEL DO
9176 END IF
9177
9178 END associate
9179
9180 END IF
9181
9182 CALL timestop(handle)
9183
9184 END SUBROUTINE pw_scatter_s_c1d_r3d
9185
9186
9187
9188
9189
9190
9191
9192
9193! **************************************************************************************************
9194!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9195!> \param pw1 ...
9196!> \param pw2 ...
9197!> \param debug ...
9198!> \par History
9199!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9200!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9201!> pw_types, no more coefficient types
9202!> \author apsi
9203!> \note
9204!> fft_wrap_pw1pw2
9205! **************************************************************************************************
9206 SUBROUTINE fft_wrap_pw1pw2_c1d_c3d_gs_rs (pw1, pw2, debug)
9207
9208 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9209 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
9210 LOGICAL, INTENT(IN), OPTIONAL :: debug
9211
9212 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9213
9214 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9215 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9216 INTEGER :: handle, handle2, my_pos, nrays, &
9217 out_unit
9218 INTEGER, DIMENSION(:), POINTER :: n
9219 LOGICAL :: test
9220
9221 CALL timeset(routinen, handle2)
9222 out_unit = cp_logger_get_default_io_unit()
9223 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9224 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9225
9226 NULLIFY (c_in)
9227 NULLIFY (c_out)
9228
9229 IF (PRESENT(debug)) THEN
9230 test = debug
9231 ELSE
9232 test = .false.
9233 END IF
9234
9235 !..check if grids are compatible
9236 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9237 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9238 cpabort("PW grids not compatible")
9239 END IF
9240 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9241 cpabort("PW grids have not compatible MPI groups")
9242 END IF
9243 END IF
9244
9245 n => pw1%pw_grid%npts
9246
9247 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9248
9249 !
9250 !..replicated data, use local FFT
9251 !
9252
9253 IF (test .AND. out_unit > 0) THEN
9254 WRITE (out_unit, '(A)') " FFT Protocol "
9255 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9256 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9257 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9258 END IF
9259
9260 c_out => pw2%array
9261 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
9262 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
9263 CALL fft3d(bwfft, n, c_out, debug=test)
9264
9265 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9266
9267 ELSE
9268
9269 !
9270 !..parallel FFT
9271 !
9272
9273 IF (test .AND. out_unit > 0) THEN
9274 WRITE (out_unit, '(A)') " FFT Protocol "
9275 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9276 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9277 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9278 END IF
9279
9280 my_pos = pw1%pw_grid%para%group%mepos
9281 nrays = pw1%pw_grid%para%nyzray(my_pos)
9282 grays => pw1%pw_grid%grays
9283
9284 !..prepare input
9285 IF (test .AND. out_unit > 0) &
9286 WRITE (out_unit, '(A)') " PW_SCATTER : 2d -> 1d "
9287 grays = z_zero
9288 CALL pw_scatter_p_c1d (pw1, grays)
9289 c_in => pw2%array
9290 !..transform
9291 IF (pw1%pw_grid%para%ray_distribution) THEN
9292 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9293 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
9294 pw1%pw_grid%para%bo, debug=test)
9295 ELSE
9296 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9297 pw1%pw_grid%para%bo, debug=test)
9298 END IF
9299 !..prepare output (nothing to do)
9300 END IF
9301
9302 IF (test .AND. out_unit > 0) THEN
9303 WRITE (out_unit, '(A)') " End of FFT Protocol "
9304 END IF
9305
9306 CALL timestop(handle)
9307 CALL timestop(handle2)
9308
9309 END SUBROUTINE fft_wrap_pw1pw2_c1d_c3d_gs_rs
9310
9311
9312
9313! **************************************************************************************************
9314!> \brief Gathers the pw vector from a 3d data field
9315!> \param pw ...
9316!> \param c ...
9317!> \param scale ...
9318!> \par History
9319!> none
9320!> \author JGH
9321! **************************************************************************************************
9322 SUBROUTINE pw_gather_s_c1d_c3d_2(pw1, pw2, scale)
9323
9324 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9325 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9326 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9327
9328 CALL pw_gather_s_c1d_c3d (pw2, pw1%array, scale)
9329
9330 END SUBROUTINE pw_gather_s_c1d_c3d_2
9331
9332! **************************************************************************************************
9333!> \brief Gathers the pw vector from a 3d data field
9334!> \param pw ...
9335!> \param c ...
9336!> \param scale ...
9337!> \par History
9338!> none
9339!> \author JGH
9340! **************************************************************************************************
9341 SUBROUTINE pw_gather_s_c1d_c3d (pw, c, scale)
9342
9343 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9344 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
9345 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9346
9347 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
9348
9349 INTEGER :: gpt, handle, l, m, n
9350
9351 CALL timeset(routinen, handle)
9352
9353 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9354 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
9355
9356 IF (PRESENT(scale)) THEN
9357!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9358 DO gpt = 1, ngpts
9359 l = mapl(ghat(1, gpt)) + 1
9360 m = mapm(ghat(2, gpt)) + 1
9361 n = mapn(ghat(3, gpt)) + 1
9362 pw%array(gpt) = scale* c(l, m, n)
9363 END DO
9364!$OMP END PARALLEL DO
9365 ELSE
9366!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9367 DO gpt = 1, ngpts
9368 l = mapl(ghat(1, gpt)) + 1
9369 m = mapm(ghat(2, gpt)) + 1
9370 n = mapn(ghat(3, gpt)) + 1
9371 pw%array(gpt) = c(l, m, n)
9372 END DO
9373!$OMP END PARALLEL DO
9374 END IF
9375
9376 END associate
9377
9378 CALL timestop(handle)
9379
9380 END SUBROUTINE pw_gather_s_c1d_c3d
9381
9382! **************************************************************************************************
9383!> \brief Scatters a pw vector to a 3d data field
9384!> \param pw ...
9385!> \param c ...
9386!> \param scale ...
9387!> \par History
9388!> none
9389!> \author JGH
9390! **************************************************************************************************
9391 SUBROUTINE pw_scatter_s_c1d_c3d_2(pw1, pw2, scale)
9392
9393 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9394 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
9395 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9396
9397 CALL pw_scatter_s_c1d_c3d (pw1, pw2%array, scale)
9398
9399 END SUBROUTINE pw_scatter_s_c1d_c3d_2
9400
9401! **************************************************************************************************
9402!> \brief Scatters a pw vector to a 3d data field
9403!> \param pw ...
9404!> \param c ...
9405!> \param scale ...
9406!> \par History
9407!> none
9408!> \author JGH
9409! **************************************************************************************************
9410 SUBROUTINE pw_scatter_s_c1d_c3d (pw, c, scale)
9411
9412 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9413 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
9414 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9415
9416 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
9417
9418 INTEGER :: gpt, handle, l, m, n
9419
9420 CALL timeset(routinen, handle)
9421
9422 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9423 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9424
9425 ! should only zero the unused bits (but the zero is needed)
9426 IF (.NOT. PRESENT(scale)) c = 0.0_dp
9427
9428 IF (PRESENT(scale)) THEN
9429!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9430 DO gpt = 1, ngpts
9431 l = mapl(ghat(1, gpt)) + 1
9432 m = mapm(ghat(2, gpt)) + 1
9433 n = mapn(ghat(3, gpt)) + 1
9434 c(l, m, n) = scale* pw%array(gpt)
9435 END DO
9436!$OMP END PARALLEL DO
9437 ELSE
9438!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9439 DO gpt = 1, ngpts
9440 l = mapl(ghat(1, gpt)) + 1
9441 m = mapm(ghat(2, gpt)) + 1
9442 n = mapn(ghat(3, gpt)) + 1
9443 c(l, m, n) = pw%array(gpt)
9444 END DO
9445!$OMP END PARALLEL DO
9446 END IF
9447
9448 END associate
9449
9450 IF (pw%pw_grid%grid_span == halfspace) THEN
9451
9452 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
9453 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9454
9455 IF (PRESENT(scale)) THEN
9456!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9457 DO gpt = 1, ngpts
9458 l = mapl(ghat(1, gpt)) + 1
9459 m = mapm(ghat(2, gpt)) + 1
9460 n = mapn(ghat(3, gpt)) + 1
9461 c(l, m, n) = scale*conjg( pw%array(gpt))
9462 END DO
9463!$OMP END PARALLEL DO
9464 ELSE
9465!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9466 DO gpt = 1, ngpts
9467 l = mapl(ghat(1, gpt)) + 1
9468 m = mapm(ghat(2, gpt)) + 1
9469 n = mapn(ghat(3, gpt)) + 1
9470 c(l, m, n) = conjg( pw%array(gpt))
9471 END DO
9472!$OMP END PARALLEL DO
9473 END IF
9474
9475 END associate
9476
9477 END IF
9478
9479 CALL timestop(handle)
9480
9481 END SUBROUTINE pw_scatter_s_c1d_c3d
9482
9483
9484
9485
9486
9487
9488
9489
9490! **************************************************************************************************
9491!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9492!> \param pw1 ...
9493!> \param pw2 ...
9494!> \param debug ...
9495!> \par History
9496!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9497!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9498!> pw_types, no more coefficient types
9499!> \author apsi
9500!> \note
9501!> fft_wrap_pw1pw2
9502! **************************************************************************************************
9503 SUBROUTINE fft_wrap_pw1pw2_c3d_r3d_gs_rs (pw1, pw2, debug)
9504
9505 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9506 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
9507 LOGICAL, INTENT(IN), OPTIONAL :: debug
9508
9509 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9510
9511 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9512 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9513 INTEGER :: handle, handle2, my_pos, nrays, &
9514 out_unit
9515 INTEGER, DIMENSION(:), POINTER :: n
9516 LOGICAL :: test
9517
9518 CALL timeset(routinen, handle2)
9519 out_unit = cp_logger_get_default_io_unit()
9520 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9521 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9522
9523 NULLIFY (c_in)
9524 NULLIFY (c_out)
9525
9526 IF (PRESENT(debug)) THEN
9527 test = debug
9528 ELSE
9529 test = .false.
9530 END IF
9531
9532 !..check if grids are compatible
9533 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9534 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9535 cpabort("PW grids not compatible")
9536 END IF
9537 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9538 cpabort("PW grids have not compatible MPI groups")
9539 END IF
9540 END IF
9541
9542 n => pw1%pw_grid%npts
9543
9544 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9545
9546 !
9547 !..replicated data, use local FFT
9548 !
9549
9550 IF (test .AND. out_unit > 0) THEN
9551 WRITE (out_unit, '(A)') " FFT Protocol "
9552 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9553 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9554 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9555 END IF
9556
9557 c_in => pw1%array
9558 ALLOCATE (c_out(n(1), n(2), n(3)))
9559 CALL fft3d(bwfft, n, c_in, c_out, debug=test)
9560 ! use real part only
9561 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
9562 pw2%array = real(c_out, kind=dp)
9563 DEALLOCATE (c_out)
9564
9565 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9566
9567 ELSE
9568
9569 !
9570 !..parallel FFT
9571 !
9572
9573 IF (test .AND. out_unit > 0) THEN
9574 WRITE (out_unit, '(A)') " FFT Protocol "
9575 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9576 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9577 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9578 END IF
9579
9580 my_pos = pw1%pw_grid%para%group%mepos
9581 nrays = pw1%pw_grid%para%nyzray(my_pos)
9582 grays => pw1%pw_grid%grays
9583
9584 END IF
9585
9586 IF (test .AND. out_unit > 0) THEN
9587 WRITE (out_unit, '(A)') " End of FFT Protocol "
9588 END IF
9589
9590 CALL timestop(handle)
9591 CALL timestop(handle2)
9592
9593 END SUBROUTINE fft_wrap_pw1pw2_c3d_r3d_gs_rs
9594
9595
9596
9597
9598! **************************************************************************************************
9599!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9600!> \param pw1 ...
9601!> \param pw2 ...
9602!> \param debug ...
9603!> \par History
9604!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9605!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9606!> pw_types, no more coefficient types
9607!> \author apsi
9608!> \note
9609!> fft_wrap_pw1pw2
9610! **************************************************************************************************
9611 SUBROUTINE fft_wrap_pw1pw2_c3d_c1d_rs_gs (pw1, pw2, debug)
9612
9613 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
9614 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9615 LOGICAL, INTENT(IN), OPTIONAL :: debug
9616
9617 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9618
9619 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9620 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9621 INTEGER :: handle, handle2, my_pos, nrays, &
9622 out_unit
9623 INTEGER, DIMENSION(:), POINTER :: n
9624 LOGICAL :: test
9625
9626 CALL timeset(routinen, handle2)
9627 out_unit = cp_logger_get_default_io_unit()
9628 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9629 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9630
9631 NULLIFY (c_in)
9632 NULLIFY (c_out)
9633
9634 IF (PRESENT(debug)) THEN
9635 test = debug
9636 ELSE
9637 test = .false.
9638 END IF
9639
9640 !..check if grids are compatible
9641 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9642 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9643 cpabort("PW grids not compatible")
9644 END IF
9645 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9646 cpabort("PW grids have not compatible MPI groups")
9647 END IF
9648 END IF
9649
9650 n => pw1%pw_grid%npts
9651
9652 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9653
9654 !
9655 !..replicated data, use local FFT
9656 !
9657
9658 IF (test .AND. out_unit > 0) THEN
9659 WRITE (out_unit, '(A)') " FFT Protocol "
9660 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9661 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9662 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9663 END IF
9664
9665 c_in => pw1%array
9666 ALLOCATE (c_out(n(1), n(2), n(3)))
9667 ! transform
9668 CALL fft3d(fwfft, n, c_in, c_out, debug=test)
9669 ! gather results
9670 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_GATHER : 3d -> 1d "
9671 CALL pw_gather_s_c1d_c3d(pw2, c_out)
9672 DEALLOCATE (c_out)
9673
9674 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9675
9676 ELSE
9677
9678 !
9679 !..parallel FFT
9680 !
9681
9682 IF (test .AND. out_unit > 0) THEN
9683 WRITE (out_unit, '(A)') " FFT Protocol "
9684 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9685 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9686 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9687 END IF
9688
9689 my_pos = pw1%pw_grid%para%group%mepos
9690 nrays = pw1%pw_grid%para%nyzray(my_pos)
9691 grays => pw1%pw_grid%grays
9692
9693 !..prepare input
9694 c_in => pw1%array
9695 grays = z_zero
9696 !..transform
9697 IF (pw1%pw_grid%para%ray_distribution) THEN
9698 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9699 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
9700 pw1%pw_grid%para%bo, debug=test)
9701 ELSE
9702 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9703 pw1%pw_grid%para%bo, debug=test)
9704 END IF
9705 !..prepare output
9706 IF (test .AND. out_unit > 0) &
9707 WRITE (out_unit, '(A)') " PW_GATHER : 2d -> 1d "
9708 CALL pw_gather_p_c1d (pw2, grays)
9709 END IF
9710
9711 IF (test .AND. out_unit > 0) THEN
9712 WRITE (out_unit, '(A)') " End of FFT Protocol "
9713 END IF
9714
9715 CALL timestop(handle)
9716 CALL timestop(handle2)
9717
9718 END SUBROUTINE fft_wrap_pw1pw2_c3d_c1d_rs_gs
9719
9720
9721
9722
9723
9724! **************************************************************************************************
9725!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9726!> \param pw1 ...
9727!> \param pw2 ...
9728!> \param debug ...
9729!> \par History
9730!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9731!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9732!> pw_types, no more coefficient types
9733!> \author apsi
9734!> \note
9735!> fft_wrap_pw1pw2
9736! **************************************************************************************************
9737 SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_rs_gs (pw1, pw2, debug)
9738
9739 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
9740 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
9741 LOGICAL, INTENT(IN), OPTIONAL :: debug
9742
9743 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9744
9745 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9746 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9747 INTEGER :: handle, handle2, my_pos, nrays, &
9748 out_unit
9749 INTEGER, DIMENSION(:), POINTER :: n
9750 LOGICAL :: test
9751
9752 CALL timeset(routinen, handle2)
9753 out_unit = cp_logger_get_default_io_unit()
9754 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9755 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9756
9757 NULLIFY (c_in)
9758 NULLIFY (c_out)
9759
9760 IF (PRESENT(debug)) THEN
9761 test = debug
9762 ELSE
9763 test = .false.
9764 END IF
9765
9766 !..check if grids are compatible
9767 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9768 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9769 cpabort("PW grids not compatible")
9770 END IF
9771 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9772 cpabort("PW grids have not compatible MPI groups")
9773 END IF
9774 END IF
9775
9776 n => pw1%pw_grid%npts
9777
9778 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9779
9780 !
9781 !..replicated data, use local FFT
9782 !
9783
9784 IF (test .AND. out_unit > 0) THEN
9785 WRITE (out_unit, '(A)') " FFT Protocol "
9786 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9787 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9788 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9789 END IF
9790
9791 c_in => pw1%array
9792 c_out => pw2%array
9793 CALL fft3d(fwfft, n, c_in, c_out, debug=test)
9794
9795 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9796
9797 ELSE
9798
9799 !
9800 !..parallel FFT
9801 !
9802
9803 IF (test .AND. out_unit > 0) THEN
9804 WRITE (out_unit, '(A)') " FFT Protocol "
9805 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9806 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9807 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9808 END IF
9809
9810 my_pos = pw1%pw_grid%para%group%mepos
9811 nrays = pw1%pw_grid%para%nyzray(my_pos)
9812 grays => pw1%pw_grid%grays
9813
9814 END IF
9815
9816 IF (test .AND. out_unit > 0) THEN
9817 WRITE (out_unit, '(A)') " End of FFT Protocol "
9818 END IF
9819
9820 CALL timestop(handle)
9821 CALL timestop(handle2)
9822
9823 END SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_rs_gs
9824
9825! **************************************************************************************************
9826!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9827!> \param pw1 ...
9828!> \param pw2 ...
9829!> \param debug ...
9830!> \par History
9831!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9832!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9833!> pw_types, no more coefficient types
9834!> \author apsi
9835!> \note
9836!> fft_wrap_pw1pw2
9837! **************************************************************************************************
9838 SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_gs_rs (pw1, pw2, debug)
9839
9840 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9841 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
9842 LOGICAL, INTENT(IN), OPTIONAL :: debug
9843
9844 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9845
9846 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9847 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9848 INTEGER :: handle, handle2, my_pos, nrays, &
9849 out_unit
9850 INTEGER, DIMENSION(:), POINTER :: n
9851 LOGICAL :: test
9852
9853 CALL timeset(routinen, handle2)
9854 out_unit = cp_logger_get_default_io_unit()
9855 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9856 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9857
9858 NULLIFY (c_in)
9859 NULLIFY (c_out)
9860
9861 IF (PRESENT(debug)) THEN
9862 test = debug
9863 ELSE
9864 test = .false.
9865 END IF
9866
9867 !..check if grids are compatible
9868 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9869 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9870 cpabort("PW grids not compatible")
9871 END IF
9872 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9873 cpabort("PW grids have not compatible MPI groups")
9874 END IF
9875 END IF
9876
9877 n => pw1%pw_grid%npts
9878
9879 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9880
9881 !
9882 !..replicated data, use local FFT
9883 !
9884
9885 IF (test .AND. out_unit > 0) THEN
9886 WRITE (out_unit, '(A)') " FFT Protocol "
9887 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9888 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9889 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9890 END IF
9891
9892 c_in => pw1%array
9893 c_out => pw2%array
9894 CALL fft3d(bwfft, n, c_in, c_out, debug=test)
9895
9896 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9897
9898 ELSE
9899
9900 !
9901 !..parallel FFT
9902 !
9903
9904 IF (test .AND. out_unit > 0) THEN
9905 WRITE (out_unit, '(A)') " FFT Protocol "
9906 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9907 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9908 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9909 END IF
9910
9911 my_pos = pw1%pw_grid%para%group%mepos
9912 nrays = pw1%pw_grid%para%nyzray(my_pos)
9913 grays => pw1%pw_grid%grays
9914
9915 END IF
9916
9917 IF (test .AND. out_unit > 0) THEN
9918 WRITE (out_unit, '(A)') " End of FFT Protocol "
9919 END IF
9920
9921 CALL timestop(handle)
9922 CALL timestop(handle2)
9923
9924 END SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_gs_rs
9925
9926
9927
9928! **************************************************************************************************
9929!> \brief Multiply all data points with a Gaussian damping factor
9930!> Needed for longrange Coulomb potential
9931!> V(\vec r)=erf(omega*r)/r
9932!> V(\vec g)=\frac{4*\pi}{g**2}*exp(-g**2/omega**2)
9933!> \param pw ...
9934!> \param omega ...
9935!> \par History
9936!> Frederick Stein (12-04-2019) created
9937!> \author Frederick Stein (12-Apr-2019)
9938!> \note
9939!> Performs a Gaussian damping
9940! **************************************************************************************************
9941 SUBROUTINE pw_gauss_damp(pw, omega)
9942
9943 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9944 REAL(kind=dp), INTENT(IN) :: omega
9945
9946 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gauss_damp'
9947
9948 INTEGER :: handle, i
9949 REAL(kind=dp) :: omega_2, tmp
9950
9951 CALL timeset(routinen, handle)
9952 cpassert(omega >= 0)
9953
9954 omega_2 = omega*omega
9955 omega_2 = 0.25_dp/omega_2
9956
9957!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp) SHARED(pw,omega_2)
9958 DO i = 1, SIZE(pw%array)
9959 tmp = exp(-pw%pw_grid%gsq(i)*omega_2)
9960 pw%array(i) = pw%array(i)*tmp
9961 END DO
9962!$OMP END PARALLEL DO
9963
9964 CALL timestop(handle)
9965
9966 END SUBROUTINE pw_gauss_damp
9967
9968! **************************************************************************************************
9969!> \brief Multiply all data points with the logarithmic derivative of a Gaussian
9970!> \param pw ...
9971!> \param omega ...
9972!> \note
9973!> Performs a Gaussian damping
9974! **************************************************************************************************
9975 SUBROUTINE pw_log_deriv_gauss(pw, omega)
9976
9977 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9978 REAL(kind=dp), INTENT(IN) :: omega
9979
9980 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_gauss'
9981
9982 INTEGER :: handle, i
9983 REAL(kind=dp) :: omega_2, tmp
9984
9985 CALL timeset(routinen, handle)
9986 cpassert(omega >= 0)
9987
9988 omega_2 = omega*omega
9989 omega_2 = 0.25_dp/omega_2
9990
9991!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp) SHARED(pw,omega_2)
9992 DO i = 1, SIZE(pw%array)
9993 tmp = 1.0_dp + omega_2*pw%pw_grid%gsq(i)
9994 pw%array(i) = pw%array(i)*tmp
9995 END DO
9996!$OMP END PARALLEL DO
9997
9998 CALL timestop(handle)
9999 END SUBROUTINE pw_log_deriv_gauss
10000
10001! **************************************************************************************************
10002!> \brief Multiply all data points with a Gaussian damping factor
10003!> Needed for longrange Coulomb potential
10004!> V(\vec r)=erf(omega*r)/r
10005!> V(\vec g)=\frac{4*\pi}{g**2}*exp(-g**2/omega**2)
10006!> \param pw ...
10007!> \param omega ...
10008!> \par History
10009!> Frederick Stein (12-04-2019) created
10010!> \author Frederick Stein (12-Apr-2019)
10011!> \note
10012!> Performs a Gaussian damping
10013! **************************************************************************************************
10014 SUBROUTINE pw_compl_gauss_damp(pw, omega)
10015
10016 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10017 REAL(kind=dp), INTENT(IN) :: omega
10018
10019 CHARACTER(len=*), PARAMETER :: routinen = 'pw_compl_gauss_damp'
10020
10021 INTEGER :: cnt, handle, i
10022 REAL(kind=dp) :: omega_2, tmp, tmp2
10023
10024 CALL timeset(routinen, handle)
10025
10026 omega_2 = omega*omega
10027 omega_2 = 0.25_dp/omega_2
10028
10029 cnt = SIZE(pw%array)
10030
10031!$OMP PARALLEL DO PRIVATE(i, tmp, tmp2) DEFAULT(NONE) SHARED(cnt, pw,omega_2)
10032 DO i = 1, cnt
10033 tmp = -omega_2*pw%pw_grid%gsq(i)
10034 IF (abs(tmp) > 1.0e-5_dp) THEN
10035 tmp2 = 1.0_dp - exp(tmp)
10036 ELSE
10037 tmp2 = tmp + 0.5_dp*tmp*(tmp + (1.0_dp/3.0_dp)*tmp**2)
10038 END IF
10039 pw%array(i) = pw%array(i)*tmp2
10040 END DO
10041!$OMP END PARALLEL DO
10042
10043 CALL timestop(handle)
10044
10045 END SUBROUTINE pw_compl_gauss_damp
10046
10047! **************************************************************************************************
10048!> \brief Multiply all data points with the logarithmic derivative of the complementary Gaussian damping factor
10049!> \param pw ...
10050!> \param omega ...
10051!> \note
10052! **************************************************************************************************
10053 SUBROUTINE pw_log_deriv_compl_gauss(pw, omega)
10054
10055 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
10056 REAL(kind=dp), INTENT(IN) :: omega
10057
10058 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_compl_gauss'
10059
10060 INTEGER :: handle, i
10061 REAL(kind=dp) :: omega_2, tmp, tmp2
10062
10063 CALL timeset(routinen, handle)
10064
10065 omega_2 = omega*omega
10066 omega_2 = 0.25_dp/omega_2
10067
10068!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp,tmp2) &
10069!$OMP SHARED(pw,omega_2)
10070 DO i = 1, SIZE(pw%array)
10071 tmp = omega_2*pw%pw_grid%gsq(i)
10072 ! For too small arguments, use the Taylor polynomial to prevent division by zero
10073 IF (abs(tmp) >= 0.003_dp) THEN
10074 tmp2 = 1.0_dp - tmp*exp(-tmp)/(1.0_dp - exp(-tmp))
10075 ELSE
10076 tmp2 = 0.5_dp*tmp - tmp**2/12.0_dp
10077 END IF
10078 pw%array(i) = pw%array(i)*tmp2
10079 END DO
10080!$OMP END PARALLEL DO
10081
10082 CALL timestop(handle)
10083
10084 END SUBROUTINE pw_log_deriv_compl_gauss
10085
10086! **************************************************************************************************
10087!> \brief Multiply all data points with a Gaussian damping factor and mixes it with the original function
10088!> Needed for mixed longrange/Coulomb potential
10089!> V(\vec r)=(a+b*erf(omega*r))/r
10090!> V(\vec g)=\frac{4*\pi}{g**2}*(a+b*exp(-g**2/omega**2))
10091!> \param pw ...
10092!> \param omega ...
10093!> \param scale_coul ...
10094!> \param scale_long ...
10095!> \par History
10096!> Frederick Stein (16-Dec-2021) created
10097!> \author Frederick Stein (16-Dec-2021)
10098!> \note
10099!> Performs a Gaussian damping
10100! **************************************************************************************************
10101 SUBROUTINE pw_gauss_damp_mix(pw, omega, scale_coul, scale_long)
10102
10103 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10104 REAL(kind=dp), INTENT(IN) :: omega, scale_coul, scale_long
10105
10106 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gauss_damp_mix'
10107
10108 INTEGER :: handle, i
10109 REAL(kind=dp) :: omega_2, tmp
10110
10111 CALL timeset(routinen, handle)
10112
10113 omega_2 = omega*omega
10114 omega_2 = 0.25_dp/omega_2
10115
10116!$OMP PARALLEL DO DEFAULT(NONE) SHARED(pw, omega_2, scale_coul, scale_long) PRIVATE(i,tmp)
10117 DO i = 1, SIZE(pw%array)
10118 tmp = scale_coul + scale_long*exp(-pw%pw_grid%gsq(i)*omega_2)
10119 pw%array(i) = pw%array(i)*tmp
10120 END DO
10121!$OMP END PARALLEL DO
10122
10123 CALL timestop(handle)
10124
10125 END SUBROUTINE pw_gauss_damp_mix
10126
10127! **************************************************************************************************
10128!> \brief Multiply all data points with the logarithmic derivative of the mixed longrange/Coulomb potential
10129!> Needed for mixed longrange/Coulomb potential
10130!> \param pw ...
10131!> \param omega ...
10132!> \param scale_coul ...
10133!> \param scale_long ...
10134!> \note
10135! **************************************************************************************************
10136 SUBROUTINE pw_log_deriv_mix_cl(pw, omega, scale_coul, scale_long)
10137
10138 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
10139 REAL(kind=dp), INTENT(IN) :: omega, scale_coul, scale_long
10140
10141 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_mix_cl'
10142
10143 INTEGER :: handle, i
10144 REAL(kind=dp) :: omega_2, tmp, tmp2
10145
10146 CALL timeset(routinen, handle)
10147
10148 omega_2 = omega*omega
10149 omega_2 = 0.25_dp/omega_2
10150
10151!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp,tmp2) &
10152!$OMP SHARED(pw,omega_2,scale_long,scale_coul)
10153 DO i = 1, SIZE(pw%array)
10154 tmp = omega_2*pw%pw_grid%gsq(i)
10155 tmp2 = 1.0_dp + scale_long*tmp*exp(-tmp)/(scale_coul + scale_long*exp(-tmp))
10156 pw%array(i) = pw%array(i)*tmp2
10157 END DO
10158!$OMP END PARALLEL DO
10159
10160 CALL timestop(handle)
10161
10162 END SUBROUTINE pw_log_deriv_mix_cl
10163
10164! **************************************************************************************************
10165!> \brief Multiply all data points with a complementary cosine
10166!> Needed for truncated Coulomb potential
10167!> V(\vec r)=1/r if r<rc, 0 else
10168!> V(\vec g)=\frac{4*\pi}{g**2}*(1-cos(g*rc))
10169!> \param pw ...
10170!> \param rcutoff ...
10171!> \par History
10172!> Frederick Stein (07-06-2021) created
10173!> \author Frederick Stein (07-Jun-2021)
10174!> \note
10175!> Multiplies by complementary cosine
10176! **************************************************************************************************
10177 SUBROUTINE pw_truncated(pw, rcutoff)
10178
10179 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10180 REAL(kind=dp), INTENT(IN) :: rcutoff
10181
10182 CHARACTER(len=*), PARAMETER :: routinen = 'pw_truncated'
10183
10184 INTEGER :: handle, i
10185 REAL(kind=dp) :: tmp, tmp2
10186
10187 CALL timeset(routinen, handle)
10188 cpassert(rcutoff >= 0)
10189
10190!$OMP PARALLEL DO PRIVATE(i,tmp,tmp2) DEFAULT(NONE) SHARED(pw, rcutoff)
10191 DO i = 1, SIZE(pw%array)
10192 tmp = sqrt(pw%pw_grid%gsq(i))*rcutoff
10193 IF (tmp >= 0.005_dp) THEN
10194 tmp2 = 1.0_dp - cos(tmp)
10195 ELSE
10196 tmp2 = tmp**2/2.0_dp*(1.0 - tmp**2/12.0_dp)
10197 END IF
10198 pw%array(i) = pw%array(i)*tmp2
10199 END DO
10200!$OMP END PARALLEL DO
10201
10202 CALL timestop(handle)
10203
10204 END SUBROUTINE pw_truncated
10205
10206! **************************************************************************************************
10207!> \brief Multiply all data points with the logarithmic derivative of the complementary cosine
10208!> This function is needed for virials using truncated Coulomb potentials
10209!> \param pw ...
10210!> \param rcutoff ...
10211!> \note
10212! **************************************************************************************************
10213 SUBROUTINE pw_log_deriv_trunc(pw, rcutoff)
10214
10215 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
10216 REAL(kind=dp), INTENT(IN) :: rcutoff
10217
10218 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_trunc'
10219
10220 INTEGER :: handle, i
10221 REAL(kind=dp) :: rchalf, tmp, tmp2
10222
10223 CALL timeset(routinen, handle)
10224 cpassert(rcutoff >= 0)
10225
10226 rchalf = 0.5_dp*rcutoff
10227!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp,tmp2) &
10228!$OMP SHARED(pw,rchalf)
10229 DO i = 1, SIZE(pw%array)
10230 tmp = rchalf*sqrt(pw%pw_grid%gsq(i))
10231 ! For too small arguments, use the Taylor polynomial to prevent division by zero
10232 IF (abs(tmp) >= 0.0001_dp) THEN
10233 tmp2 = 1.0_dp - tmp/tan(tmp)
10234 ELSE
10235 tmp2 = tmp**2*(1.0_dp + tmp**2/15.0_dp)/3.0_dp
10236 END IF
10237 pw%array(i) = pw%array(i)*tmp2
10238 END DO
10239!$OMP END PARALLEL DO
10240
10241 CALL timestop(handle)
10242
10243 END SUBROUTINE pw_log_deriv_trunc
10244
10245! **************************************************************************************************
10246!> \brief Calculate the derivative of a plane wave vector
10247!> \param pw ...
10248!> \param n ...
10249!> \par History
10250!> JGH (06-10-2002) allow only for inplace derivatives
10251!> \author JGH (25-Feb-2001)
10252!> \note
10253!> Calculate the derivative dx^n(1) dy^n(2) dz^n(3) PW
10254! **************************************************************************************************
10255 SUBROUTINE pw_derive(pw, n)
10256
10257 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10258 INTEGER, DIMENSION(3), INTENT(IN) :: n
10259
10260 CHARACTER(len=*), PARAMETER :: routinen = 'pw_derive'
10261
10262 COMPLEX(KIND=dp) :: im
10263 INTEGER :: handle, m, idx, idir
10264
10265 CALL timeset(routinen, handle)
10266
10267 IF (any(n < 0)) &
10268 cpabort("Nonnegative exponents are not supported!")
10269
10270 m = sum(n)
10271 im = cmplx(0.0_dp, 1.0_dp, kind=dp)**m
10272
10273 DO idir = 1, 3
10274 IF (n(idir) == 1) THEN
10275!$OMP PARALLEL DO DEFAULT(NONE) SHARED(pw,idir) PRIVATE(idx)
10276 DO idx = 1, SIZE(pw%array)
10277 pw%array(idx) = pw%array(idx)*pw%pw_grid%g(idir, idx)
10278 END DO
10279!$OMP END PARALLEL DO
10280 ELSE IF (n(idir) > 1) THEN
10281!$OMP PARALLEL DO DEFAULT(NONE) SHARED(n, pw,idir) PRIVATE(idx)
10282 DO idx = 1, SIZE(pw%array)
10283 pw%array(idx) = pw%array(idx)*pw%pw_grid%g(idir, idx)**n(idir)
10284 END DO
10285!$OMP END PARALLEL DO
10286 END IF
10287 END DO
10288
10289 ! im can take the values 1, -1, i, -i
10290 ! skip this if im == 1
10291 IF (abs(real(im, kind=dp) - 1.0_dp) > 1.0e-10_dp) THEN
10292!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(im, pw)
10293 pw%array(:) = im*pw%array(:)
10294!$OMP END PARALLEL WORKSHARE
10295 END IF
10296
10297 CALL timestop(handle)
10298
10299 END SUBROUTINE pw_derive
10300
10301! **************************************************************************************************
10302!> \brief Calculate the Laplacian of a plane wave vector
10303!> \param pw ...
10304!> \par History
10305!> Frederick Stein (01-02-2022) created
10306!> \author JGH (25-Feb-2001)
10307!> \note
10308!> Calculate the derivative DELTA PW
10309! **************************************************************************************************
10310 SUBROUTINE pw_laplace(pw)
10311
10312 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10313
10314 CHARACTER(len=*), PARAMETER :: routinen = 'pw_laplace'
10315
10316 INTEGER :: handle
10317
10318 CALL timeset(routinen, handle)
10319
10320!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
10321 pw%array(:) = -pw%array(:)*pw%pw_grid%gsq(:)
10322!$OMP END PARALLEL WORKSHARE
10323
10324 CALL timestop(handle)
10325
10326 END SUBROUTINE pw_laplace
10327
10328! **************************************************************************************************
10329!> \brief Calculate the tensorial 2nd derivative of a plane wave vector
10330!> \param pw ...
10331!> \param pwdr2 ...
10332!> \param i ...
10333!> \param j ...
10334!> \par History
10335!> none
10336!> \author JGH (05-May-2006)
10337!> \note
10338! **************************************************************************************************
10339 SUBROUTINE pw_dr2(pw, pwdr2, i, j)
10340
10341 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw, pwdr2
10342 INTEGER, INTENT(IN) :: i, j
10343
10344 CHARACTER(len=*), PARAMETER :: routinen = 'pw_dr2'
10345
10346 INTEGER :: cnt, handle, ig
10347 REAL(kind=dp) :: gg, o3
10348
10349 CALL timeset(routinen, handle)
10350
10351 o3 = 1.0_dp/3.0_dp
10352
10353 cnt = SIZE(pw%array)
10354
10355 IF (i == j) THEN
10356!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(ig,gg) SHARED(cnt, i, o3, pw, pwdr2)
10357 DO ig = 1, cnt
10358 gg = pw%pw_grid%g(i, ig)**2 - o3*pw%pw_grid%gsq(ig)
10359 pwdr2%array(ig) = gg*pw%array(ig)
10360 END DO
10361!$OMP END PARALLEL DO
10362 ELSE
10363!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) SHARED(cnt, i, j, pw, pwdr2)
10364 DO ig = 1, cnt
10365 pwdr2%array(ig) = pw%array(ig)*(pw%pw_grid%g(i, ig)*pw%pw_grid%g(j, ig))
10366 END DO
10367!$OMP END PARALLEL DO
10368 END IF
10369
10370 CALL timestop(handle)
10371
10372 END SUBROUTINE pw_dr2
10373
10374! **************************************************************************************************
10375!> \brief Calculate the tensorial 2nd derivative of a plane wave vector
10376!> and divides by |G|^2. pwdr2_gg(G=0) is put to zero.
10377!> \param pw ...
10378!> \param pwdr2_gg ...
10379!> \param i ...
10380!> \param j ...
10381!> \par History
10382!> none
10383!> \author RD (20-Nov-2006)
10384!> \note
10385!> Adapted from pw_dr2
10386! **************************************************************************************************
10387 SUBROUTINE pw_dr2_gg(pw, pwdr2_gg, i, j)
10388
10389 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw, pwdr2_gg
10390 INTEGER, INTENT(IN) :: i, j
10391
10392 INTEGER :: cnt, handle, ig
10393 REAL(kind=dp) :: gg, o3
10394 CHARACTER(len=*), PARAMETER :: routinen = 'pw_dr2_gg'
10395
10396 CALL timeset(routinen, handle)
10397
10398 o3 = 1.0_dp/3.0_dp
10399
10400 cnt = SIZE(pw%array)
10401
10402 IF (i == j) THEN
10403!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) PRIVATE(gg) SHARED(cnt, i, o3, pw, pwdr2_gg)
10404 DO ig = pw%pw_grid%first_gne0, cnt
10405 gg = pw%pw_grid%g(i, ig)**2 - o3*pw%pw_grid%gsq(ig)
10406 pwdr2_gg%array(ig) = gg/pw%pw_grid%gsq(ig)*pw%array(ig)
10407 END DO
10408!$OMP END PARALLEL DO
10409 ELSE
10410!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) SHARED(cnt, i, j, pw, pwdr2_gg)
10411 DO ig = pw%pw_grid%first_gne0, cnt
10412 pwdr2_gg%array(ig) = pw%array(ig)*((pw%pw_grid%g(i, ig)*pw%pw_grid%g(j, ig)) &
10413 /pw%pw_grid%gsq(ig))
10414 END DO
10415!$OMP END PARALLEL DO
10416 END IF
10417
10418 IF (pw%pw_grid%have_g0) pwdr2_gg%array(1) = 0.0_dp
10419
10420 CALL timestop(handle)
10421
10422 END SUBROUTINE pw_dr2_gg
10423
10424! **************************************************************************************************
10425!> \brief Multiplies a G-space function with a smoothing factor of the form
10426!> f(|G|) = exp((ecut - G^2)/sigma)/(1+exp((ecut - G^2)/sigma))
10427!> \param pw ...
10428!> \param ecut ...
10429!> \param sigma ...
10430!> \par History
10431!> none
10432!> \author JGH (09-June-2006)
10433!> \note
10434! **************************************************************************************************
10435 SUBROUTINE pw_smoothing(pw, ecut, sigma)
10436
10437 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10438 REAL(kind=dp), INTENT(IN) :: ecut, sigma
10439
10440 CHARACTER(len=*), PARAMETER :: routinen = 'pw_smoothing'
10441
10442 INTEGER :: cnt, handle, ig
10443 REAL(kind=dp) :: arg, f
10444
10445 CALL timeset(routinen, handle)
10446
10447 cnt = SIZE(pw%array)
10448
10449!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(ig, arg, f) SHARED(cnt, ecut, pw, sigma)
10450 DO ig = 1, cnt
10451 arg = (ecut - pw%pw_grid%gsq(ig))/sigma
10452 f = exp(arg)/(1 + exp(arg))
10453 pw%array(ig) = f*pw%array(ig)
10454 END DO
10455!$OMP END PARALLEL DO
10456
10457 CALL timestop(handle)
10458
10459 END SUBROUTINE pw_smoothing
10460
10461! **************************************************************************************************
10462!> \brief ...
10463!> \param grida ...
10464!> \param gridb ...
10465!> \return ...
10466! **************************************************************************************************
10467 ELEMENTAL FUNCTION pw_compatible(grida, gridb) RESULT(compat)
10468
10469 TYPE(pw_grid_type), INTENT(IN) :: grida, gridb
10470 LOGICAL :: compat
10471
10472 compat = .false.
10473
10474 IF (grida%id_nr == gridb%id_nr) THEN
10475 compat = .true.
10476 ELSE IF (grida%reference == gridb%id_nr) THEN
10477 compat = .true.
10478 ELSE IF (gridb%reference == grida%id_nr) THEN
10479 compat = .true.
10480 END IF
10481
10482 END FUNCTION pw_compatible
10483
10484! **************************************************************************************************
10485!> \brief Calculate the structure factor for point r
10486!> \param sf ...
10487!> \param r ...
10488!> \par History
10489!> none
10490!> \author JGH (05-May-2006)
10491!> \note
10492! **************************************************************************************************
10493 SUBROUTINE pw_structure_factor(sf, r)
10494
10495 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: sf
10496 REAL(kind=dp), DIMENSION(:), INTENT(IN) :: r
10497
10498 CHARACTER(len=*), PARAMETER :: routinen = 'pw_structure_factor'
10499
10500 INTEGER :: cnt, handle, ig
10501 REAL(kind=dp) :: arg
10502
10503 CALL timeset(routinen, handle)
10504
10505 cnt = SIZE(sf%array)
10506
10507!$OMP PARALLEL DO PRIVATE (ig, arg) DEFAULT(NONE) SHARED(cnt, r, sf)
10508 DO ig = 1, cnt
10509 arg = dot_product(sf%pw_grid%g(:, ig), r)
10510 sf%array(ig) = cmplx(cos(arg), -sin(arg), kind=dp)
10511 END DO
10512!$OMP END PARALLEL DO
10513
10514 CALL timestop(handle)
10515
10516 END SUBROUTINE pw_structure_factor
10517
10518 END MODULE pw_methods
static GRID_HOST_DEVICE int idx(const orbital a)
Return coset index of given orbital angular momentum.
various routines to log and control the output. The idea is that decisions about where to log should ...
integer function, public cp_logger_get_default_io_unit(logger)
returns the unit nr for the ionode (-1 on all other processors) skips as well checks if the procs cal...
integer, parameter, public bwfft
Definition fft_tools.F:146
integer, parameter, public fwfft
Definition fft_tools.F:146
sums arrays of real/complex numbers with much reduced round-off as compared to a naive implementation...
Definition kahan_sum.F:29
Defines the basic variable types.
Definition kinds.F:23
integer, parameter, public dp
Definition kinds.F:34
Machine interface based on Fortran 2003 and POSIX.
Definition machine.F:17
subroutine, public m_memory(mem)
Returns the total amount of memory [bytes] in use, if known, zero otherwise.
Definition machine.F:440
Definition of mathematical constants and functions.
complex(kind=dp), parameter, public z_zero
subroutine, public pw_copy_match(pw1, pw2)
copy a pw type variable
Definition pw_copy_all.F:42
integer function, public pw_fpga_init_bitstream(n)
Invoke the pw_fpga_check_bitstream C function passing the path to the data dir.
Definition pw_fpga.F:263
subroutine, public pw_fpga_r3dc1d_3d_dp(n, c_out)
perform an in-place double precision fft3d on the FPGA
Definition pw_fpga.F:146
subroutine, public pw_fpga_r3dc1d_3d_sp(n, c_out)
perform an in-place single precision fft3d on the FPGA
Definition pw_fpga.F:194
subroutine, public pw_fpga_c1dr3d_3d_dp(n, c_out)
perform an in-place double precision inverse fft3d on the FPGA
Definition pw_fpga.F:170
subroutine, public pw_fpga_c1dr3d_3d_sp(n, c_out)
perform an in-place single precision inverse fft3d on the FPGA
Definition pw_fpga.F:228
subroutine, public pw_gpu_c1dr3d_3d_ps(pw1, pw2)
perform an parallel scatter followed by a fft on the gpu
Definition pw_gpu.F:380
subroutine, public pw_gpu_c1dr3d_3d(pw1, pw2)
perform an scatter followed by a fft on the gpu
Definition pw_gpu.F:160
subroutine, public pw_gpu_r3dc1d_3d(pw1, pw2)
perform an fft followed by a gather on the gpu
Definition pw_gpu.F:102
subroutine, public pw_gpu_r3dc1d_3d_ps(pw1, pw2)
perform an parallel fft followed by a gather on the gpu
Definition pw_gpu.F:218
integer, parameter, public halfspace
integer, parameter, public pw_mode_local
integer, parameter, public pw_mode_distributed
subroutine, public pw_smoothing(pw, ecut, sigma)
Multiplies a G-space function with a smoothing factor of the form f(|G|) = exp((ecut - G^2)/sigma)/(1...
subroutine, public pw_log_deriv_trunc(pw, rcutoff)
Multiply all data points with the logarithmic derivative of the complementary cosine This function is...
subroutine, public pw_gauss_damp(pw, omega)
Multiply all data points with a Gaussian damping factor Needed for longrange Coulomb potential V(\vec...
subroutine, public pw_log_deriv_compl_gauss(pw, omega)
Multiply all data points with the logarithmic derivative of the complementary Gaussian damping factor...
subroutine, public pw_dr2_gg(pw, pwdr2_gg, i, j)
Calculate the tensorial 2nd derivative of a plane wave vector and divides by |G|^2....
subroutine, public pw_laplace(pw)
Calculate the Laplacian of a plane wave vector.
subroutine, public pw_log_deriv_gauss(pw, omega)
Multiply all data points with the logarithmic derivative of a Gaussian.
subroutine, public pw_gauss_damp_mix(pw, omega, scale_coul, scale_long)
Multiply all data points with a Gaussian damping factor and mixes it with the original function Neede...
subroutine, public pw_truncated(pw, rcutoff)
Multiply all data points with a complementary cosine Needed for truncated Coulomb potential V(\vec r)...
integer, parameter, public do_accurate_sum
Definition pw_methods.F:79
integer, parameter, public do_standard_sum
Definition pw_methods.F:79
subroutine, public pw_derive(pw, n)
Calculate the derivative of a plane wave vector.
subroutine, public pw_structure_factor(sf, r)
Calculate the structure factor for point r.
subroutine, public pw_compl_gauss_damp(pw, omega)
Multiply all data points with a Gaussian damping factor Needed for longrange Coulomb potential V(\vec...
subroutine, public pw_log_deriv_mix_cl(pw, omega, scale_coul, scale_long)
Multiply all data points with the logarithmic derivative of the mixed longrange/Coulomb potential Nee...
subroutine, public pw_dr2(pw, pwdr2, i, j)
Calculate the tensorial 2nd derivative of a plane wave vector.